mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	@@ -1776,7 +1776,7 @@ bool llama_kv_cache_unified_context::next() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool llama_kv_cache_unified_context::apply() {
 | 
			
		||||
    assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
 | 
			
		||||
    assert(!llama_memory_status_is_fail(status));
 | 
			
		||||
 | 
			
		||||
    // no ubatches -> this is a KV cache update
 | 
			
		||||
    if (ubatches.empty()) {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user