mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : document logits_all deprecation (#4418)
llama_context_params.logits_all is a parameter for controlling llama_eval. This documents that logits_all should not be used with llama_decode and llama_batch.
This commit is contained in:
		
							
								
								
									
										2
									
								
								llama.h
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								llama.h
									
									
									
									
									
								
							| @@ -216,7 +216,7 @@ extern "C" { | |||||||
|  |  | ||||||
|         // Keep the booleans together to avoid misalignment during copy-by-value. |         // Keep the booleans together to avoid misalignment during copy-by-value. | ||||||
|         bool mul_mat_q;   // if true, use experimental mul_mat_q kernels (DEPRECATED - always true) |         bool mul_mat_q;   // if true, use experimental mul_mat_q kernels (DEPRECATED - always true) | ||||||
|         bool logits_all;  // the llama_eval() call computes all logits, not just the last one |         bool logits_all;  // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) | ||||||
|         bool embedding;   // embedding mode only |         bool embedding;   // embedding mode only | ||||||
|         bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU |         bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU | ||||||
|     }; |     }; | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 crasm
					crasm