mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	cuda : print message when initialization fails (#5512)
* cuda : print message when initialization fails * use CUDA_NAME both times
This commit is contained in:
		| @@ -7943,6 +7943,7 @@ GGML_CALL void ggml_init_cublas() { | |||||||
|         if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) { |         if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) { | ||||||
|             initialized = true; |             initialized = true; | ||||||
|             g_cublas_loaded = false; |             g_cublas_loaded = false; | ||||||
|  |             fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__); | ||||||
|             return; |             return; | ||||||
|         } |         } | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 slaren
					slaren