mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : do not crash if there is no CPU backend (#13395)
* llama : do not crash if there is no CPU backend * add checks to examples
This commit is contained in:
		| @@ -253,6 +253,9 @@ static void llama_adapter_lora_init_impl(llama_model & model, const char * path_ | ||||
|     std::vector<ggml_backend_buffer_type_t> buft_extra; | ||||
|     { | ||||
|         auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); | ||||
|         if (!cpu_dev) { | ||||
|             throw std::runtime_error(format("%s: no CPU backend found", __func__)); | ||||
|         } | ||||
|         auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); | ||||
|  | ||||
|         auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) | ||||
| @@ -291,6 +294,9 @@ static void llama_adapter_lora_init_impl(llama_model & model, const char * path_ | ||||
|                 LLAMA_LOG_WARN("%s: lora for '%s' cannot use buft '%s', fallback to CPU\n", __func__, model_tensor->name, ggml_backend_buft_name(buft)); | ||||
|  | ||||
|                 auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); | ||||
|                 if (!cpu_dev) { | ||||
|                     throw std::runtime_error(format("%s: no CPU backend found", __func__)); | ||||
|                 } | ||||
|                 buft = ggml_backend_dev_buffer_type(cpu_dev); | ||||
|  | ||||
|                 break; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Diego Devesa
					Diego Devesa