mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* CUDA: add FP32 FlashAttention vector kernel * fixup! CUDA: add FP32 FlashAttention vector kernel * fixup! fixup! CUDA: add FP32 FlashAttention vector kernel * fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
		
			
				
	
	
		
			4 lines
		
	
	
		
			114 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			4 lines
		
	
	
		
			114 B
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
#include "common.cuh"
 | 
						|
 | 
						|
void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
 |