mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : deprecate llama_kv_self_ API (#14030)
* llama : deprecate llama_kv_self_ API ggml-ci * llama : allow llama_memory_(nullptr) ggml-ci * memory : add flag for optional data clear in llama_memory_clear ggml-ci
This commit is contained in:
		| @@ -129,13 +129,15 @@ llama_kv_cache_unified::llama_kv_cache_unified( | ||||
|     } | ||||
| } | ||||
|  | ||||
| void llama_kv_cache_unified::clear() { | ||||
| void llama_kv_cache_unified::clear(bool data) { | ||||
|     cells.reset(); | ||||
|  | ||||
|     head = 0; | ||||
|  | ||||
|     for (auto & buf : bufs) { | ||||
|         ggml_backend_buffer_clear(buf.get(), 0); | ||||
|     if (data) { | ||||
|         for (auto & buf : bufs) { | ||||
|             ggml_backend_buffer_clear(buf.get(), 0); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -1319,7 +1321,7 @@ void llama_kv_cache_unified::state_read(llama_io_read_i & io, llama_seq_id seq_i | ||||
|  | ||||
|     if (!res) { | ||||
|         if (seq_id == -1) { | ||||
|             clear(); | ||||
|             clear(true); | ||||
|         } else { | ||||
|             seq_rm(seq_id, -1, -1); | ||||
|         } | ||||
| @@ -1500,7 +1502,7 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell | ||||
|             return false; | ||||
|         } | ||||
|  | ||||
|         clear(); | ||||
|         clear(true); | ||||
|  | ||||
|         for (uint32_t i = 0; i < cell_count; ++i) { | ||||
|             llama_pos pos; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov