mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	common : Changed tuple to struct (TODO fix) (#8823)
* common : Changed tuple to struct (TODO fix) Use struct `llama_init_result` to replace the previous std::tuple<struct llama_model *, struct llama_context *> * delete llama_init_default_params() * delete the extra whitespace
This commit is contained in:
		| @@ -163,9 +163,10 @@ int main(int argc, char ** argv) { | ||||
|     params.warmup = false; | ||||
|  | ||||
|     // init | ||||
|     llama_model * model; | ||||
|     llama_context * ctx; | ||||
|     std::tie(model, ctx) = llama_init_from_gpt_params(params); | ||||
|     llama_init_result llama_init = llama_init_from_gpt_params(params); | ||||
|  | ||||
|     llama_model * model = llama_init.model; | ||||
|     llama_context * ctx = llama_init.context; | ||||
|     if (model == nullptr || ctx == nullptr) { | ||||
|         fprintf(stderr, "%s : failed to init\n", __func__); | ||||
|         return 1; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Liu Jia
					Liu Jia