mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	android : fix KV cache log message condition (#12212)
This commit is contained in:
		| @@ -361,7 +361,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1init( | ||||
|     const auto tokens_list = common_tokenize(context, text, true, parse_special); | ||||
|  | ||||
|     auto n_ctx = llama_n_ctx(context); | ||||
|     auto n_kv_req = tokens_list.size() + (n_len - tokens_list.size()); | ||||
|     auto n_kv_req = tokens_list.size() + n_len; | ||||
|  | ||||
|     LOGi("n_len = %d, n_ctx = %d, n_kv_req = %d", n_len, n_ctx, n_kv_req); | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Han Yin
					Han Yin