mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	cuda : add ampere to the list of default architectures (#11870)
This commit is contained in:
		| @@ -15,9 +15,9 @@ if (CUDAToolkit_FOUND) | ||||
|         if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.24") | ||||
|             set(CMAKE_CUDA_ARCHITECTURES "native") | ||||
|         elseif(GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16) | ||||
|             set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75") | ||||
|             set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75;80") | ||||
|         else() | ||||
|             set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75") | ||||
|             set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75;80") | ||||
|         endif() | ||||
|     endif() | ||||
|     message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}") | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Diego Devesa
					Diego Devesa