mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	kv-cache : allow context shift for recurrent models
This commit is contained in:
		@@ -1938,7 +1938,8 @@ llama_pos llama_kv_cache_recurrent::get_pos_max() const {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool llama_kv_cache_recurrent::get_can_shift() const {
 | 
			
		||||
    return false;
 | 
			
		||||
    // shifting is trivial, the recurrent states don't care about the absolute position
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
uint32_t llama_kv_cache_recurrent::cell_max() const {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user