mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	Final touches
This commit is contained in:
		
							
								
								
									
										6
									
								
								utils.h
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								utils.h
									
									
									
									
									
								
							| @@ -15,12 +15,12 @@ | ||||
| struct gpt_params { | ||||
|     int32_t seed      = -1; // RNG seed | ||||
|     int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); | ||||
|     int32_t n_predict = 200; // new tokens to predict | ||||
|     int32_t n_predict = 128; // new tokens to predict | ||||
|  | ||||
|     // sampling parameters | ||||
|     int32_t top_k = 100; | ||||
|     int32_t top_k = 40; | ||||
|     float   top_p = 0.95f; | ||||
|     float   temp  = 0.8f; | ||||
|     float   temp  = 0.80f; | ||||
|  | ||||
|     int32_t n_batch = 8; // batch size for prompt processing | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov