mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : fix handling of "future" tokens when loading sessions
This commit is contained in:
		| @@ -448,7 +448,7 @@ struct llama_server_context | ||||
|         n_past = common_part(embd, prompt_tokens); | ||||
|  | ||||
|         // since #3228 we now have to manually manage the KV cache | ||||
|         llama_kv_cache_seq_rm(ctx, 0, n_past, params.n_ctx); | ||||
|         llama_kv_cache_seq_rm(ctx, 0, n_past, -1); | ||||
|  | ||||
|         embd = prompt_tokens; | ||||
|         if (n_past == num_prompt_tokens) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov