mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	cuda : print the returned error when CUDA initialization fails (#6185)
This commit is contained in:
		| @@ -294,8 +294,9 @@ static ggml_cuda_device_info ggml_cuda_init() { | |||||||
|  |  | ||||||
|     ggml_cuda_device_info info = {}; |     ggml_cuda_device_info info = {}; | ||||||
|  |  | ||||||
|     if (cudaGetDeviceCount(&info.device_count) != cudaSuccess) { |     cudaError_t err = cudaGetDeviceCount(&info.device_count); | ||||||
|         fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__); |     if (err != cudaSuccess) { | ||||||
|  |         fprintf(stderr, "%s: failed to initialize " GGML_CUDA_NAME ": %s\n", __func__, cudaGetErrorString(err)); | ||||||
|         return info; |         return info; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 slaren
					slaren