mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	Fix bool return in llama_model_load, remove std::ignore use
This commit is contained in:
		| @@ -3700,7 +3700,7 @@ static bool llm_load_tensors( | ||||
|     return ok; | ||||
| } | ||||
|  | ||||
| // Returns -1 on error, -2 on cancellation via llama_progress_callback | ||||
| // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback | ||||
| static int llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) { | ||||
|     try { | ||||
|         llama_model_loader ml(fname, params.use_mmap, params.kv_overrides); | ||||
| @@ -3719,7 +3719,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, cons | ||||
|  | ||||
|         if (params.vocab_only) { | ||||
|             LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__); | ||||
|             return true; | ||||
|             return 0; | ||||
|         } | ||||
|  | ||||
|         if (!llm_load_tensors( | ||||
|   | ||||
| @@ -1,17 +1,16 @@ | ||||
| #include "llama.h" | ||||
|  | ||||
| #include <cstdlib> | ||||
| #include <tuple> | ||||
|  | ||||
| int main(void) { | ||||
|     llama_backend_init(false); | ||||
|     auto params = llama_model_params{}; | ||||
|     params.use_mmap = false; | ||||
|     params.progress_callback = [](float progress, void * ctx){ | ||||
|         std::ignore = ctx; | ||||
|         (void) ctx; | ||||
|         return progress > 0.50; | ||||
|     }; | ||||
|     auto * model = llama_load_model_from_file("../models/7B/ggml-model-f16.gguf", params); | ||||
|     auto * model = llama_load_model_from_file("models/7B/ggml-model-f16.gguf", params); | ||||
|     llama_backend_free(); | ||||
|     return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE; | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 crasm
					crasm