mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	server: fix incorrectly reported token probabilities (#7125)
* server: normalize token probabilities * fix temperature == 0.0f
This commit is contained in:
		| @@ -81,6 +81,7 @@ struct llama_sampling_context { | ||||
|     // TODO: replace with ring-buffer | ||||
|     std::vector<llama_token>      prev; | ||||
|     std::vector<llama_token_data> cur; | ||||
|     size_t n_considered; | ||||
|  | ||||
|     std::mt19937 rng; | ||||
| }; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Johannes Gäßler
					Johannes Gäßler