mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : refactor llama_context, llama_kv_cache, llm_build_context (#12181)
* llama : refactor llama_context, llama_kv_cache, llm_build_context ggml-ci * graph : don't mutate the KV cache during defrag ggml-ci * context : reduce virtuals + remove test function ggml-ci * context : move interface implementation to source file + factory ggml-ci * graph : move KV cache build functions to llama_context impl ggml-ci * graph : remove model reference from build_pooling ggml-ci * graph : remove llama_model reference ggml-ci * kv_cache : provide rope factors ggml-ci * graph : rework inputs to use only unique_ptr, remove attn input abstraction ggml-ci * context : remove llama_context_i abstraction ggml-ci * context : clean-up ggml-ci * graph : clean-up ggml-ci * llama : remove redundant keywords (struct, enum) ggml-ci * model : adapt gemma3 ggml-ci * graph : restore same attention ops as on master ggml-ci * llama : remove TODO + fix indent ggml-ci
This commit is contained in:
		| @@ -891,7 +891,7 @@ static int apply_chat_template(const struct common_chat_templates * tmpls, Llama | ||||
| // Function to tokenize the prompt | ||||
| static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt, | ||||
|                            std::vector<llama_token> & prompt_tokens, const LlamaData & llama_data) { | ||||
|     const bool is_first = llama_get_kv_cache_used_cells(llama_data.context.get()) == 0; | ||||
|     const bool is_first = llama_kv_self_used_cells(llama_data.context.get()) == 0; | ||||
|  | ||||
|     const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, is_first, true); | ||||
|     prompt_tokens.resize(n_prompt_tokens); | ||||
| @@ -907,7 +907,7 @@ static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt | ||||
| // Check if we have enough space in the context to evaluate this batch | ||||
| static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) { | ||||
|     const int n_ctx      = llama_n_ctx(ctx.get()); | ||||
|     const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get()); | ||||
|     const int n_ctx_used = llama_kv_self_used_cells(ctx.get()); | ||||
|     if (n_ctx_used + batch.n_tokens > n_ctx) { | ||||
|         printf(LOG_COL_DEFAULT "\n"); | ||||
|         printe("context size exceeded\n"); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov