mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : update llama_decode_internal ref [no ci] (#11840)
This commit updates the comment in llama_kv_cache.h to reflect the change of the function name from llama_decode_internal to llama_decode_impl.
This commit is contained in:
		| @@ -37,7 +37,7 @@ struct llama_kv_cache { | ||||
|     bool can_shift = false; | ||||
|  | ||||
|     // Note: The value of head isn't only used to optimize searching | ||||
|     // for a free KV slot. llama_decode_internal also uses it, so it | ||||
|     // for a free KV slot. llama_decode_impl also uses it, so it | ||||
|     // cannot be freely changed after a slot has been allocated. | ||||
|     uint32_t head = 0; | ||||
|     uint32_t size = 0; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Daniel Bevenius
					Daniel Bevenius