mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-21 12:16:57 +00:00
11 lines
357 B
Plaintext
11 lines
357 B
Plaintext
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
|
|
|
|
#include "../fattn-mma-f16.cuh"
|
|
|
|
DECL_FATTN_MMA_F16_CASE(64, 64, 8, 8);
|
|
DECL_FATTN_MMA_F16_CASE(80, 80, 8, 8);
|
|
DECL_FATTN_MMA_F16_CASE(96, 96, 8, 8);
|
|
DECL_FATTN_MMA_F16_CASE(112, 112, 8, 8);
|
|
DECL_FATTN_MMA_F16_CASE(128, 128, 8, 8);
|
|
DECL_FATTN_MMA_F16_CASE(256, 256, 8, 8);
|