mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	common : Changed tuple to struct (TODO fix) (#8823)
* common : Changed tuple to struct (TODO fix) Use struct `llama_init_result` to replace the previous std::tuple<struct llama_model *, struct llama_context *> * delete llama_init_default_params() * delete the extra whitespace
This commit is contained in:
		| @@ -148,11 +148,12 @@ int main(int argc, char ** argv) { | ||||
|     llama_backend_init(); | ||||
|     llama_numa_init(params.numa); | ||||
|  | ||||
|     llama_model * model; | ||||
|     llama_context * ctx; | ||||
|  | ||||
|     // load the model | ||||
|     std::tie(model, ctx) = llama_init_from_gpt_params(params); | ||||
|     llama_init_result llama_init = llama_init_from_gpt_params(params); | ||||
|  | ||||
|     llama_model * model = llama_init.model; | ||||
|     llama_context * ctx = llama_init.context; | ||||
|  | ||||
|     if (model == NULL) { | ||||
|         fprintf(stderr, "%s: error: unable to load model\n", __func__); | ||||
|         return 1; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Liu Jia
					Liu Jia