mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	FP16 is supported in CM=6.0 (#2177)
* FP16 is supported in CM=6.0 * Building PTX code for both of 60 and 61 Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
This commit is contained in:
		| @@ -272,7 +272,7 @@ if (LLAMA_CUBLAS) | |||||||
|  |  | ||||||
|     if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) |     if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) | ||||||
|         if (LLAMA_CUDA_DMMV_F16) |         if (LLAMA_CUDA_DMMV_F16) | ||||||
|             set(CMAKE_CUDA_ARCHITECTURES "61") # needed for f16 CUDA intrinsics |             set(CMAKE_CUDA_ARCHITECTURES "60;61") # needed for f16 CUDA intrinsics | ||||||
|         else() |         else() | ||||||
|             set(CMAKE_CUDA_ARCHITECTURES "52;61") # lowest CUDA 12 standard + lowest for integer intrinsics |             set(CMAKE_CUDA_ARCHITECTURES "52;61") # lowest CUDA 12 standard + lowest for integer intrinsics | ||||||
|         endif() |         endif() | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Howard Su
					Howard Su