mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* CUDA: add FP32 FlashAttention vector kernel * fixup! CUDA: add FP32 FlashAttention vector kernel * fixup! fixup! CUDA: add FP32 FlashAttention vector kernel * fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
		
			
				
	
	
		
			6 lines
		
	
	
		
			213 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			6 lines
		
	
	
		
			213 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#include "common.cuh"
 | 
						|
 | 
						|
void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
 | 
						|
 | 
						|
void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
 |