mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	Default to 4 threads (#243)
This commit is contained in:
		
							
								
								
									
										4
									
								
								utils.h
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								utils.h
									
									
									
									
									
								
							| @@ -14,11 +14,11 @@ | |||||||
|  |  | ||||||
| struct gpt_params { | struct gpt_params { | ||||||
|     int32_t seed      = -1; // RNG seed |     int32_t seed      = -1; // RNG seed | ||||||
|     int32_t n_threads; |     int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); | ||||||
|     int32_t n_predict = 128; // new tokens to predict |     int32_t n_predict = 128; // new tokens to predict | ||||||
|     int32_t repeat_last_n = 64;  // last n tokens to penalize |     int32_t repeat_last_n = 64;  // last n tokens to penalize | ||||||
|     int32_t n_ctx = 512; //context size |     int32_t n_ctx = 512; //context size | ||||||
|      |  | ||||||
|     // sampling parameters |     // sampling parameters | ||||||
|     int32_t top_k = 40; |     int32_t top_k = 40; | ||||||
|     float   top_p = 0.95f; |     float   top_p = 0.95f; | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov