mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	limit to GGML_ALLOW_CUDA_GRAPHS defined in llama.cpp cmake
This commit is contained in:
		@@ -413,6 +413,7 @@ if (LLAMA_CUDA)
 | 
			
		||||
        list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
 | 
			
		||||
 | 
			
		||||
        add_compile_definitions(GGML_USE_CUDA)
 | 
			
		||||
        add_compile_definitions(GGML_ALLOW_CUDA_GRAPHS)
 | 
			
		||||
        if (LLAMA_CUDA_FORCE_DMMV)
 | 
			
		||||
            add_compile_definitions(GGML_CUDA_FORCE_DMMV)
 | 
			
		||||
        endif()
 | 
			
		||||
 
 | 
			
		||||
@@ -2405,7 +2405,7 @@ GGML_CALL static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
 | 
			
		||||
    GGML_UNUSED(backend);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if (CUDART_VERSION >= 12000)
 | 
			
		||||
#if (CUDART_VERSION >= 12000) && defined(GGML_ALLOW_CUDA_GRAPHS)
 | 
			
		||||
#define USE_CUDA_GRAPH
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user