mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	Init llama_context_params properly from CLI (#370)
This commit is contained in:
		| @@ -1398,6 +1398,10 @@ struct llama_context * llama_init_from_file( | ||||
|  | ||||
|     llama_context * ctx = new llama_context; | ||||
|  | ||||
|     if (params.seed <= 0) { | ||||
|         params.seed = time(NULL); | ||||
|     } | ||||
|  | ||||
|     ctx->rng = std::mt19937(params.seed); | ||||
|     ctx->logits_all = params.logits_all; | ||||
|  | ||||
|   | ||||
							
								
								
									
										5
									
								
								main.cpp
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								main.cpp
									
									
									
									
									
								
							| @@ -194,7 +194,10 @@ int main(int argc, char ** argv) { | ||||
|     { | ||||
|         auto lparams = llama_context_default_params(); | ||||
|  | ||||
|         lparams.f16_kv = params.memory_f16; | ||||
|         lparams.n_ctx      = params.n_ctx; | ||||
|         lparams.n_parts    = params.n_parts; | ||||
|         lparams.seed       = params.seed; | ||||
|         lparams.f16_kv     = params.memory_f16; | ||||
|         lparams.logits_all = params.perplexity; | ||||
|  | ||||
|         ctx = llama_init_from_file(params.model.c_str(), lparams); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov