mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-17 11:37:10 +00:00
CUDA: add conv_2d_transpose (#14287)
* CUDA: add conv_2d_transpose * remove direct include of cuda_fp16 * Review: add brackets for readability, remove ggml_set_param and add asserts
This commit is contained in:
4
ggml/src/ggml-cuda/conv2d-transpose.cuh
Normal file
4
ggml/src/ggml-cuda/conv2d-transpose.cuh
Normal file
@@ -0,0 +1,4 @@
|
||||
#include "common.cuh"
|
||||
|
||||
#define CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE 256
|
||||
void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||
Reference in New Issue
Block a user