mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	CUDA: check if event is NULL before cudaStreamWaitEvent (#2505)
Fixes #2503
This commit is contained in:
		| @@ -5203,7 +5203,7 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm | ||||
|     if (split && g_device_count > 1) { | ||||
|         CUDA_CHECK(cudaSetDevice(g_main_device)); | ||||
|         for (int id = 0; id < g_device_count; ++id) { | ||||
|             if (id != g_main_device) { | ||||
|             if (id != g_main_device && src0_extra->events[id]) { | ||||
|                 CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams_main[g_main_device], src0_extra->events[id])); | ||||
|             } | ||||
|         } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Cebtenzzre
					Cebtenzzre