mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	common, server : surface min_keep as its own parameter (#5567)
* Feature - surface min_keep as its own parameter * Updated README with min_keep param
This commit is contained in:
		| @@ -22,6 +22,7 @@ enum class llama_sampler_type : char { | ||||
| typedef struct llama_sampling_params { | ||||
|     int32_t     n_prev                = 64;       // number of previous tokens to remember | ||||
|     int32_t     n_probs               = 0;        // if greater than 0, output the probabilities of top n_probs tokens. | ||||
|     int32_t     min_keep              = 0;        // 0 = disabled, otherwise samplers should return at least min_keep tokens | ||||
|     int32_t     top_k                 = 40;       // <= 0 to use vocab size | ||||
|     float       top_p                 = 0.95f;    // 1.0 = disabled | ||||
|     float       min_p                 = 0.05f;    // 0.0 = disabled | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Robey Holderith
					Robey Holderith