mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	minor : fix compile warnings
This commit is contained in:
		| @@ -44,15 +44,15 @@ struct gpt_params { | ||||
|     float   mirostat_tau      = 5.00f; // target entropy | ||||
|     float   mirostat_eta      = 0.10f; // learning rate | ||||
|  | ||||
|     std::string model  = "models/7B/ggml-model.bin"; // model path | ||||
|     std::string prompt = ""; | ||||
|     std::string model             = "models/7B/ggml-model.bin"; // model path | ||||
|     std::string prompt            = ""; | ||||
|     std::string path_prompt_cache = "";  // path to file for saving/loading prompt eval state | ||||
|     std::string input_prefix      = "";  // string to prefix user inputs with | ||||
|     std::string input_suffix      = "";  // string to suffix user inputs with | ||||
|     std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted | ||||
|  | ||||
|     std::string lora_adapter = "";  // lora adapter path | ||||
|     std::string lora_base = "";     // base model path for the lora adapter | ||||
|     std::string lora_base    = "";  // base model path for the lora adapter | ||||
|  | ||||
|     bool memory_f16        = true;  // use f16 instead of f32 for memory kv | ||||
|     bool random_prompt     = false; // do not randomize prompt if none provided | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov