mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : make model stateless and context stateful (llama_state) (#1797)
* llama : make model stateless and context stateful * llama : minor cleanup * llama : update internal API declaration * Apply suggestions from code review fix style Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Missing model memory release * Fix style * Add deprecated warning for public API function llama_init_from_file * Update public API use cases: move away from deprecated llama_init_from_file * Deprecate public API function llama_apply_lora_from_file --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
		| @@ -9,6 +9,7 @@ | ||||
| #include <random> | ||||
| #include <thread> | ||||
| #include <unordered_map> | ||||
| #include <tuple> | ||||
|  | ||||
| #if !defined (_WIN32) | ||||
| #include <stdio.h> | ||||
| @@ -95,7 +96,7 @@ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::s | ||||
| // Model utils | ||||
| // | ||||
|  | ||||
| struct llama_context * llama_init_from_gpt_params(const gpt_params & params); | ||||
| std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params); | ||||
|  | ||||
| // | ||||
| // Console utils | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Didzis Gosko
					Didzis Gosko