mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-21 12:16:57 +00:00
CUDA: refactor FA support/selection code (#15454)
This commit is contained in:
@@ -1,3 +1,5 @@
|
||||
#include "common.cuh"
|
||||
|
||||
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
|
||||
bool ggml_cuda_flash_attn_ext_supported(int device, const ggml_tensor * dst);
|
||||
|
||||
Reference in New Issue
Block a user