opencl: initial q8_0 mv support (#15732)

This commit is contained in:
lhez
2025-09-21 14:48:44 -07:00
committed by GitHub
parent da30ab5f86
commit c4510dc937
7 changed files with 1115 additions and 9 deletions

View File

@@ -82,9 +82,13 @@ set(GGML_OPENCL_KERNELS
mul_mv_q4_0_f32_1d_8x_flat
mul_mv_q4_0_f32_1d_16x_flat
mul_mv_q6_k
mul_mv_q8_0_f32
mul_mv_q8_0_f32_flat
mul_mv_mxfp4_f32
mul_mv_mxfp4_f32_flat
mul_mv_id_q4_0_f32_8x_flat
mul_mv_id_q8_0_f32
mul_mv_id_q8_0_f32_flat
mul_mv_id_mxfp4_f32
mul_mv_id_mxfp4_f32_flat
mul_mm_f32_f32_l4_lm

View File

@@ -367,6 +367,7 @@ struct ggml_backend_opencl_context {
cl_program program_mul_mv_q4_0_f32_1d_8x_flat;
cl_program program_mul_mv_q4_0_f32_1d_16x_flat;
cl_program program_mul_mv_q6_K;
cl_program program_mul_mv_q8_0_f32, program_mul_mv_q8_0_f32_flat;
cl_program program_mul_mv_mxfp4_f32;
cl_program program_mul_mv_mxfp4_f32_flat;
cl_program program_mul_mv_f16_f16;
@@ -402,6 +403,7 @@ struct ggml_backend_opencl_context {
cl_program program_conv_2d_f16_f32;
cl_program program_tsembd;
cl_program program_mul_mv_id_q4_0_f32_8x_flat;
cl_program program_mul_mv_id_q8_0_f32, program_mul_mv_id_q8_0_f32_flat;
cl_program program_mul_mv_id_mxfp4_f32;
cl_program program_mul_mv_id_mxfp4_f32_flat;
cl_program program_mul_mm_f32_f32_l4_lm;
@@ -450,11 +452,13 @@ struct ggml_backend_opencl_context {
cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v;
cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0;
cl_kernel kernel_convert_block_mxfp4, kernel_restore_block_mxfp4;
cl_kernel kernel_convert_block_q8_0, kernel_restore_block_q8_0;
cl_kernel kernel_mul_mat_q4_0_f32_8x_flat;
cl_kernel kernel_convert_block_q4_0_noshuffle;
cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat;
cl_kernel kernel_mul_mv_q6_K_f32;
cl_kernel kernel_mul_mv_mxfp4_f32, kernel_mul_mv_mxfp4_f32_flat;
cl_kernel kernel_mul_mv_q8_0_f32, kernel_mul_mv_q8_0_f32_flat;
cl_kernel kernel_im2col_f32, kernel_im2col_f16;
cl_kernel kernel_argsort_f32_i32;
cl_kernel kernel_sum_rows_f32;
@@ -471,6 +475,7 @@ struct ggml_backend_opencl_context {
cl_kernel kernel_conv_2d_f16_f32;
cl_kernel kernel_timestep_embedding;
cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat;
cl_kernel kernel_mul_mv_id_q8_0_f32, kernel_mul_mv_id_q8_0_f32_flat;
cl_kernel kernel_mul_mv_id_mxfp4_f32;
cl_kernel kernel_mul_mv_id_mxfp4_f32_flat;
cl_kernel kernel_mul_mm_f32_f32_l4_lm;
@@ -769,8 +774,10 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
CL_CHECK((backend_ctx->kernel_convert_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0_noshuffle", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err));
CL_CHECK((backend_ctx->kernel_convert_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q8_0", &err), err));
CL_CHECK((backend_ctx->kernel_restore_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q8_0", &err), err));
GGML_LOG_CONT(".");
}
@@ -992,6 +999,38 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
GGML_LOG_CONT(".");
}
// mul_mv_q8_0_f32
{
#ifdef GGML_OPENCL_EMBED_KERNELS
const std::string kernel_src {
#include "mul_mv_q8_0_f32.cl.h"
};
#else
const std::string kernel_src = read_file("mul_mv_q8_0_f32.cl");
#endif
backend_ctx->program_mul_mv_q8_0_f32 =
build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
CL_CHECK((backend_ctx->kernel_mul_mv_q8_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_q8_0_f32, "kernel_mul_mv_q8_0_f32", &err), err));
GGML_LOG_CONT(".");
}
// mul_mv_q8_0_f32_flat
{
#ifdef GGML_OPENCL_EMBED_KERNELS
const std::string kernel_src {
#include "mul_mv_q8_0_f32_flat.cl.h"
};
#else
const std::string kernel_src = read_file("mul_mv_q8_0_f32_flat.cl");
#endif
backend_ctx->program_mul_mv_q8_0_f32_flat =
build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
CL_CHECK((backend_ctx->kernel_mul_mv_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_q8_0_f32_flat, "kernel_mul_mv_q8_0_f32_flat", &err), err));
GGML_LOG_CONT(".");
}
// mul_mv_mxfp4_f32
{
#ifdef GGML_OPENCL_EMBED_KERNELS
@@ -1733,6 +1772,38 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
GGML_LOG_CONT(".");
}
// mul_mv_id_q8_0_f32
{
#ifdef GGML_OPENCL_EMBED_KERNELS
const std::string kernel_src {
#include "mul_mv_id_q8_0_f32.cl.h"
};
#else
const std::string kernel_src = read_file("mul_mv_id_q8_0_f32.cl");
#endif
backend_ctx->program_mul_mv_id_q8_0_f32 =
build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32, "kernel_mul_mv_id_q8_0_f32", &err), err));
GGML_LOG_CONT(".");
}
// mul_mv_id_q8_0_f32_flat
{
#ifdef GGML_OPENCL_EMBED_KERNELS
const std::string kernel_src {
#include "mul_mv_id_q8_0_f32_flat.cl.h"
};
#else
const std::string kernel_src = read_file("mul_mv_id_q8_0_f32_flat.cl");
#endif
backend_ctx->program_mul_mv_id_q8_0_f32_flat =
build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32_flat, "kernel_mul_mv_id_q8_0_f32_flat", &err), err));
GGML_LOG_CONT(".");
}
// mul_mv_id_mxfp4_f32
{
#ifdef GGML_OPENCL_EMBED_KERNELS
@@ -2463,10 +2534,8 @@ struct ggml_tensor_extra_cl_mxfp4 {
CL_CHECK(clReleaseMemObject(q_img));
q = nullptr;
}
// Currently, q_img and d_img are only initialized when SMALL_ALLOC is
// enabled. They point to the images in ggml_backend_opencl_buffer_context.
// So, there is no need to release them here.
// TODO: initialize them for non SMALL_PATH path, or remove them.
// Currently, q_img and d_img are not used. They can be image1d_buffer_t
// that wraps around q and d to utilize image access path.
q_img = nullptr;
e_img = nullptr;
size_q = 0;
@@ -2474,6 +2543,41 @@ struct ggml_tensor_extra_cl_mxfp4 {
}
};
struct ggml_tensor_extra_cl_q8_0 {
cl_mem q = nullptr;
cl_mem q_img = nullptr;
cl_mem d = nullptr;
cl_mem d_img = nullptr;
size_t size_q = 0;
size_t size_d = 0;
~ggml_tensor_extra_cl_q8_0() {
reset();
}
void reset() {
// q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer.
// They must be properly released so that the original buffer can be
// properly released to avoid memory leak.
if (q != nullptr) {
CL_CHECK(clReleaseMemObject(q));
q = nullptr;
}
if (d != nullptr) {
CL_CHECK(clReleaseMemObject(d));
d = nullptr;
}
// Currently, q_img and d_img are not used. They can be image1d_buffer_t
// that wraps around q and d to utilize image access path.
q_img = nullptr;
d_img = nullptr;
size_q = 0;
size_d = 0;
}
};
//------------------------------------------------------------------------------
// Backend API
//------------------------------------------------------------------------------
@@ -2807,10 +2911,13 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
} else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_MXFP4 ||
op->src[0]->type == GGML_TYPE_Q6_K) {
return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
} else if (op->src[0]->type == GGML_TYPE_Q8_0) {
return op->src[1]->type == GGML_TYPE_F32;
}
return false;
case GGML_OP_MUL_MAT_ID:
if (op->src[0]->type == GGML_TYPE_Q4_0 ||
op->src[0]->type == GGML_TYPE_Q8_0 ||
op->src[0]->type == GGML_TYPE_MXFP4) {
if (op->src[1]->type == GGML_TYPE_F32) {
return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
@@ -2983,6 +3090,12 @@ struct ggml_backend_opencl_buffer_context {
for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) {
delete e;
}
for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0) {
delete e;
}
for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0_in_use) {
delete e;
}
}
ggml_tensor_extra_cl * ggml_opencl_alloc_temp_tensor_extra() {
@@ -3030,6 +3143,21 @@ struct ggml_backend_opencl_buffer_context {
return extra;
}
ggml_tensor_extra_cl_q8_0 * ggml_opencl_alloc_temp_tensor_extra_q8_0() {
ggml_tensor_extra_cl_q8_0 * extra;
if (temp_tensor_extras_q8_0.empty()) {
extra = new ggml_tensor_extra_cl_q8_0();
} else {
extra = temp_tensor_extras_q8_0.back();
temp_tensor_extras_q8_0.pop_back();
}
temp_tensor_extras_q8_0_in_use.push_back(extra);
extra->reset();
return extra;
}
void reset() {
for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) {
temp_tensor_extras.push_back(e);
@@ -3045,6 +3173,11 @@ struct ggml_backend_opencl_buffer_context {
temp_tensor_extras_mxfp4.push_back(e);
}
temp_tensor_extras_mxfp4_in_use.clear();
for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0_in_use) {
temp_tensor_extras_q8_0.push_back(e);
}
temp_tensor_extras_q8_0_in_use.clear();
}
// Pools for extras. Available extras are in `temp_tensor_extras`. Extras
@@ -3058,6 +3191,8 @@ struct ggml_backend_opencl_buffer_context {
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0_in_use;
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4;
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4_in_use;
std::vector<ggml_tensor_extra_cl_q8_0 *> temp_tensor_extras_q8_0;
std::vector<ggml_tensor_extra_cl_q8_0 *> temp_tensor_extras_q8_0_in_use;
// The buffer_context is initially created by ggml_backend_buft_alloc_buffer
// before any tensor is initialized (at the beginning of alloc_tensor_range).
@@ -3470,6 +3605,65 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer,
tensor->extra = extra;
return;
}
if (tensor->type == GGML_TYPE_Q8_0) {
ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra;
GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized");
// Allocate the new extra and create aliases from the original.
ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
ggml_tensor_extra_cl_q8_0 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_q8_0();
size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*(ggml_blck_size(tensor->type)*sizeof(char));
GGML_ASSERT(size_d + size_q == ggml_nbytes(tensor) && "Incorrect tensor size");
cl_int err;
cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
ggml_nbytes(tensor), NULL, &err);
CL_CHECK(err);
CL_CHECK(clEnqueueWriteBuffer(
queue, data_device, CL_TRUE, 0,
ggml_nbytes(tensor), data, 0, NULL, NULL));
// The original tensor memory is divided into scales and quants, i.e.,
// we first store scales, then quants.
cl_buffer_region region;
// Create subbuffer for scales.
region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment);
region.size = size_d;
extra->d = clCreateSubBuffer(
extra_orig->data_device, CL_MEM_READ_WRITE,
CL_BUFFER_CREATE_TYPE_REGION, &region, &err);
CL_CHECK(err);
auto previous_origin = region.origin;
// Create subbuffer for quants.
region.origin = align_to(previous_origin + size_d, backend_ctx->alignment);
region.size = size_q;
extra->q = clCreateSubBuffer(
extra_orig->data_device, CL_MEM_READ_WRITE,
CL_BUFFER_CREATE_TYPE_REGION, &region, &err);
CL_CHECK(err);
cl_kernel kernel = backend_ctx->kernel_convert_block_q8_0;
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d));
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
size_t local_work_size[] = {64, 1, 1};
cl_event evt;
CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt));
CL_CHECK(clWaitForEvents(1, &evt));
CL_CHECK(clReleaseMemObject(data_device));
tensor->extra = extra;
return;
}
#endif // GGML_OPENCL_SOA_Q
@@ -3543,6 +3737,32 @@ static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer,
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
size_t local_work_size[] = {1, 1, 1};
cl_event evt;
CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
global_work_size, local_work_size, 0, NULL, &evt));
CL_CHECK(clWaitForEvents(1, &evt));
CL_CHECK(clEnqueueReadBuffer(
queue, data_device, CL_TRUE, offset,
size, data, 0, NULL, NULL));
CL_CHECK(clReleaseMemObject(data_device));
return;
}
if (tensor->type == GGML_TYPE_Q8_0) {
ggml_tensor_extra_cl_q8_0 * extra = (ggml_tensor_extra_cl_q8_0 *)tensor->extra;
cl_int err;
cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
ggml_nbytes(tensor), NULL, &err);
CL_CHECK(err);
cl_kernel kernel = backend_ctx->kernel_restore_block_q8_0;
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->d));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device));
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
size_t local_work_size[] = {1, 1, 1};
cl_event evt;
CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
global_work_size, local_work_size, 0, NULL, &evt));
@@ -6268,6 +6488,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
#ifdef GGML_OPENCL_SOA_Q
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra;
#endif
const int ne00 = src0 ? src0->ne[0] : 0;
@@ -6937,7 +7158,84 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
#endif // GGML_OPENCL_SOA_Q
break;
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q8_0:
case GGML_TYPE_Q8_0: {
#ifdef GGML_OPENCL_SOA_Q
kernel = backend_ctx->kernel_mul_mv_q8_0_f32_flat;
// nth0 - subgroup size
// nth1 - number of subgroups per workgroup
// ndst - number of output values per workgroup = output per subgroup * number of subgroups
if (backend_ctx->gpu_family == INTEL) {
nth0 = 16;
nth1 = 2;
ndst = nth1*4;
} else if (backend_ctx->gpu_family == ADRENO) {
nth0 = 64;
nth1 = 2;
ndst = nth1*4;
} else {
GGML_ASSERT(false && "TODO: Unknown GPU");
}
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01));
CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02));
CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03));
CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12));
CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11));
CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12));
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb13));
CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne0));
CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne1));
CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2));
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3));
#else
kernel = backend_ctx->kernel_mul_mv_q8_0_f32;
// nth0 - subgroup size
// nth1 - number of subgroups per workgroup
// ndst - number of output values per workgroup = output per subgroup * number of subgroups
if (backend_ctx->gpu_family == INTEL) {
nth0 = 16;
nth1 = 2;
ndst = nth1*4;
} else if (backend_ctx->gpu_family == ADRENO) {
nth0 = 64;
nth1 = 2;
ndst = nth1*4;
} else {
GGML_ASSERT(false && "TODO: Unknown GPU");
}
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01));
CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02));
CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03));
CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12));
CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11));
CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12));
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb13));
CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne0));
CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne1));
CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2));
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3));
#endif // GGML_OPENCL_SOA_Q
break;
}
case GGML_TYPE_Q2_K:
case GGML_TYPE_Q3_K:
case GGML_TYPE_Q4_K:
@@ -7115,6 +7413,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
#ifdef GGML_OPENCL_SOA_Q
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra;
#endif
const int ne00 = src0->ne[0];
@@ -7202,6 +7501,82 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
break;
}
case GGML_TYPE_Q8_0: {
#ifdef GGML_OPENCL_SOA_Q
kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32_flat;
if (backend_ctx->gpu_family == INTEL) {
sgs = 16;
nsg = 2;
ndst = 4;
} else if (backend_ctx->gpu_family == ADRENO) {
sgs = 64;
nsg = 2;
ndst = 4;
} else {
GGML_ASSERT(false && "TODO: Unknown GPU");
}
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device));
CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2));
CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device));
CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd));
CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00));
CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01));
CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01));
CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02));
CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11));
CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12));
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11));
CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12));
CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20));
CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21));
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21));
CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0));
CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1));
#else
kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32;
if (backend_ctx->gpu_family == INTEL) {
sgs = 16;
nsg = 2;
ndst = 4;
} else if (backend_ctx->gpu_family == ADRENO) {
sgs = 64;
nsg = 2;
ndst = 4;
} else {
GGML_ASSERT(false && "TODO: Unknown GPU");
}
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device));
CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2));
CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device));
CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd));
CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00));
CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01));
CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01));
CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02));
CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11));
CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12));
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11));
CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12));
CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20));
CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21));
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21));
CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0));
CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1));
#endif // GGML_OPENCL_SOA_Q
break;
}
case GGML_TYPE_MXFP4: {
#ifdef GGML_OPENCL_SOA_Q
kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat;

View File

@@ -117,9 +117,8 @@ kernel void kernel_convert_block_q4_0_noshuffle(
}
}
//------------------------------------------------------------------------------
// block_q4_0
// block_mxfp4
//------------------------------------------------------------------------------
#define QK_MXFP4 32
struct block_mxfp4 {
@@ -162,3 +161,42 @@ kernel void kernel_restore_block_mxfp4(
b->qs[i] = q[i];
}
}
//------------------------------------------------------------------------------
// block_q8_0
//------------------------------------------------------------------------------
typedef struct {
half d; // delta
char qs[QK8_0]; // quants
} block_q8_0;
kernel void kernel_convert_block_q8_0(
global block_q8_0 * src0,
global uchar * dst_q,
global half * dst_d
) {
global block_q8_0 * b = (global block_q8_0 *) src0 + get_global_id(0);
global uchar * q = (global uchar *) dst_q + QK8_0*get_global_id(0);
global half * d = (global half *) dst_d + get_global_id(0);
*d = b->d;
for (int i = 0; i < QK8_0; ++i) {
q[i] = b->qs[i];
}
}
kernel void kernel_restore_block_q8_0(
global uchar * src_q,
global half * src_d,
global block_q8_0 * dst
) {
global block_q8_0 * b = (global block_q8_0 *) dst + get_global_id(0);
global uchar * q = (global uchar *) src_q + QK8_0*get_global_id(0);
global half * d = (global half *) src_d + get_global_id(0);
b->d = *d;
for (int i = 0; i < QK8_0; ++i) {
b->qs[i] = q[i];
}
}

View File

@@ -0,0 +1,140 @@
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#ifdef cl_intel_subgroups
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
#else
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
#endif
#ifdef cl_intel_required_subgroup_size
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
#define INTEL_GPU 1
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
#elif defined(cl_qcom_reqd_sub_group_size)
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
#define ADRENO_GPU 1
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
#endif
#define QK8_0 32
typedef struct {
half d; // delta
char qs[QK8_0]; // quants
} block_q8_0;
#define NB_Q8_0 8
#ifdef INTEL_GPU
#define N_R0_Q8_0 4 // number of rows each subgroup works on
#define N_SG_Q8_0 2 // number of subgroups in a work group
#define N_SIMDWIDTH 16 // subgroup size
#elif defined (ADRENO_GPU)
#define N_R0_Q8_0 4
#define N_SG_Q8_0 2
#define N_SIMDWIDTH 64
#endif
#ifdef INTEL_GPU
REQD_SUBGROUP_SIZE_16
#elif defined (ADRENO_GPU)
REQD_SUBGROUP_SIZE_64
#endif
kernel void kernel_mul_mv_id_q8_0_f32(
global char * src0,
ulong offset0,
global char * src1,
ulong offset1,
global char * src2,
ulong offset2,
global char * dst,
ulong offsetd,
int ne00,
int ne01,
ulong nb01,
ulong nb02,
int ne11,
int ne12,
ulong nb11,
ulong nb12,
int ne20,
int ne21,
ulong nb21,
int ne0,
int ne1
) {
src0 = (global char *)((global char *)src0 + offset0);
src1 = (global char *)((global char *)src1 + offset1);
src2 = (global char *)((global char *)src2 + offset2);
dst = (global char *)((global char *)dst + offsetd);
int iid1 = get_group_id(2)/ne20;
int idx = get_group_id(2)%ne20;
int i02 = ((global int *) (src2 + iid1*nb21))[idx];
int i11_ = idx % ne11;
int i12_ = iid1;
int i1 = idx;
int i2 = i12_;
global char * src0_cur = src0 + i02*nb02;
global char * src1_cur = src1 + i11_*nb11 + i12_*nb12;
global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float);
int nb = ne00/QK8_0;
int r0 = get_group_id(0);
int r1 = get_group_id(1);
int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0;
ulong offset_src1 = r1*nb11;
global float * y = (global float *) (src1_cur + offset_src1);
// pointers to src0 rows
global block_q8_0 * ax[N_R0_Q8_0];
for (int row = 0; row < N_R0_Q8_0; ++row) {
ulong offset_src0 = (first_row + row)*nb01;
ax[row] = (global block_q8_0 *) ((global char *) src0_cur + offset_src0);
}
float yl[NB_Q8_0];
float sumf[N_R0_Q8_0] = { 0.f };
const short ix = get_sub_group_local_id()/4;
const short il = get_sub_group_local_id()%4;
global float * yb = y + ix*QK8_0 + il*NB_Q8_0;
// each thread handles NB_Q8_0 quants at a time
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) {
for (short i = 0; i < NB_Q8_0; ++i) {
yl[i] = yb[i];
}
for (short row = 0; row < N_R0_Q8_0; row++) {
global char * qs = ax[row][ib].qs + il*NB_Q8_0;
float sumq = 0.f;
for (short iq = 0; iq < NB_Q8_0; ++iq) {
sumq += qs[iq] * yl[iq];
}
sumf[row] += sumq*ax[row][ib].d;
}
yb += N_SIMDWIDTH*NB_Q8_0;
}
global float * dst_f32 = (global float *) dst_cur + (ulong)r1*ne0;
for (int row = 0; row < N_R0_Q8_0; ++row) {
float tot = sub_group_reduce_add(sumf[row]);
if (get_sub_group_local_id() == 0 && first_row + row < ne01) {
dst_f32[first_row + row] = tot;
}
}
}

View File

@@ -0,0 +1,222 @@
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#ifdef cl_intel_subgroups
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
#else
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
#endif
#ifdef cl_intel_required_subgroup_size
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
#define INTEL_GPU 1
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
#elif defined(cl_qcom_reqd_sub_group_size)
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
#define ADRENO_GPU 1
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
#endif
#define QK8_0 32
typedef struct {
half d; // delta
char qs[QK8_0]; // quants
} block_q8_0;
#define NB_Q8_0 8
#ifdef INTEL_GPU
#define N_R0_Q8_0 4 // number of rows each subgroup works on
#define N_SG_Q8_0 2 // number of subgroups in a work group
#define N_SIMDWIDTH 16 // subgroup size
#elif defined (ADRENO_GPU)
#define N_R0_Q8_0 4
#define N_SG_Q8_0 2
#define N_SIMDWIDTH 64
#endif
#ifdef INTEL_GPU
REQD_SUBGROUP_SIZE_16
#elif defined (ADRENO_GPU)
REQD_SUBGROUP_SIZE_64
#endif
kernel void kernel_mul_mv_id_q8_0_f32_flat(
global char * src0_q,
global half * src0_d,
global char * src1,
ulong offset1,
global char * src2,
ulong offset2,
global char * dst,
ulong offsetd,
int ne00,
int ne01,
ulong nb01,
ulong nb02,
int ne11,
int ne12,
ulong nb11,
ulong nb12,
int ne20,
int ne21,
ulong nb21,
int ne0,
int ne1
) {
src1 = (global char *)((global char *)src1 + offset1);
src2 = (global char *)((global char *)src2 + offset2);
dst = (global char *)((global char *)dst + offsetd);
int iid1 = (int)get_group_id(2)/ne20;
int idx = (int)get_group_id(2)%ne20;
int i02 = ((global int *) (src2 + iid1*nb21))[idx];
int i11_ = idx % ne11;
int i12_ = iid1;
int i1 = idx;
int i2 = i12_;
// 34 == sizeof(block_q8_0)
uint src0_off = i02*nb02;
src0_off /= 34;
global char * src0_q_cur = src0_q + src0_off*sizeof(char)*QK8_0;
global half * src0_d_cur = src0_d + src0_off;
global char * src1_cur = src1 + i11_*nb11 + i12_*nb12;
global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float);
int nb = ne00/QK8_0;
int r0 = get_group_id(0);
int r1 = get_group_id(1);
int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0;
ulong offset_src1 = r1*nb11;
global float * y = (global float *) (src1_cur + offset_src1);
// pointers to src0 rows
uint offset_src0_base = first_row*nb01;
global char * ax0, * ax1, * ax2, * ax3;
global half * ad0, * ad1, * ad2, * ad3;
uint offset_src0;
offset_src0 = offset_src0_base + 0*nb01;
offset_src0 = offset_src0/34;
ax0 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0);
ad0 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half));
offset_src0 = offset_src0_base + 1*nb01;
offset_src0 = offset_src0/34;
ax1 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0);
ad1 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half));
offset_src0 = offset_src0_base + 2*nb01;
offset_src0 = offset_src0/34;
ax2 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0);
ad2 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half));
offset_src0 = offset_src0_base + 3*nb01;
offset_src0 = offset_src0/34;
ax3 = (global char *) ((global char *) src0_q_cur + offset_src0*sizeof(char)*QK8_0);
ad3 = (global half *) ((global char *) src0_d_cur + offset_src0*sizeof(half));
const short ix = get_sub_group_local_id()/4;
const short il = get_sub_group_local_id()%4;
global float * yb = y + ix*QK8_0 + il*NB_Q8_0;
float8 yl;
float8 qv;
float4 sumf = 0.f;
float sumq = 0.f;
global char * qs;
// each thread handles NB_Q8_0 quants at a time
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) {
yl = vload8(0, yb);
qs = ax0 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s0 += sumq*ad0[ib];
qs = ax1 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s1 += sumq*ad1[ib];
qs = ax2 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s2 += sumq*ad2[ib];
qs = ax3 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s3 += sumq*ad3[ib];
yb += N_SIMDWIDTH*NB_Q8_0;
}
global float * dst_f32 = (global float *) dst_cur + (ulong)r1*ne0;
float4 tot = (float4)(
sub_group_reduce_add(sumf.s0),
sub_group_reduce_add(sumf.s1),
sub_group_reduce_add(sumf.s2),
sub_group_reduce_add(sumf.s3)
);
if (get_sub_group_local_id() == 0) {
if (first_row + 0 < ne01) {
dst_f32[first_row + 0] = tot.s0;
}
if (first_row + 1 < ne01) {
dst_f32[first_row + 1] = tot.s1;
}
if (first_row + 2 < ne01) {
dst_f32[first_row + 2] = tot.s2;
}
if (first_row + 3 < ne01) {
dst_f32[first_row + 3] = tot.s3;
}
}
}

View File

@@ -0,0 +1,125 @@
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#ifdef cl_intel_subgroups
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
#else
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
#endif
#ifdef cl_intel_required_subgroup_size
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
#define INTEL_GPU 1
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
#elif defined(cl_qcom_reqd_sub_group_size)
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
#define ADRENO_GPU 1
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
#endif
#define QK8_0 32
typedef struct {
half d; // delta
char qs[QK8_0]; // quants
} block_q8_0;
#define NB_Q8_0 8
#ifdef INTEL_GPU
#define N_R0_Q8_0 4 // number of rows each subgroup works on
#define N_SG_Q8_0 2 // number of subgroups in a work group
#define N_SIMDWIDTH 16 // subgroup size
#elif defined (ADRENO_GPU)
#define N_R0_Q8_0 4
#define N_SG_Q8_0 2
#define N_SIMDWIDTH 64
#endif
#ifdef INTEL_GPU
REQD_SUBGROUP_SIZE_16
#elif defined (ADRENO_GPU)
REQD_SUBGROUP_SIZE_64
#endif
kernel void kernel_mul_mv_q8_0_f32(
global char * src0,
ulong offset0,
global char * src1,
ulong offset1,
global char * dst,
ulong offsetd,
int ne00,
int ne01,
ulong nb01,
ulong nb02,
ulong nb03,
int ne12,
ulong nb11,
ulong nb12,
ulong nb13,
int ne0,
int ne1,
int r2,
int r3
) {
src0 = (global char*)((global char*)src0 + offset0);
src1 = (global char*)((global char*)src1 + offset1);
dst = (global char*)((global char*)dst + offsetd);
int nb = ne00/QK8_0;
int r0 = get_group_id(0);
int r1 = get_group_id(1);
int im = get_group_id(2);
int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0;
uint i12 = im%ne12;
uint i13 = im/ne12;
ulong offset_src1 = r1*nb11 + i12*nb12 + i13*nb13;
global float * y = (global float *) (src1 + offset_src1);
// pointers to src0 rows
global block_q8_0 * ax[N_R0_Q8_0];
for (int row = 0; row < N_R0_Q8_0; ++row) {
ulong offset_src0 = (first_row + row)*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03;
ax[row] = (global block_q8_0 *) ((global char *) src0 + offset_src0);
}
float yl[NB_Q8_0];
float sumf[N_R0_Q8_0] = { 0.f };
const short ix = get_sub_group_local_id()/4;
const short il = get_sub_group_local_id()%4;
global float * yb = y + ix*QK8_0 + il*NB_Q8_0;
// each thread handles NB_Q8_0 quants at a time
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) {
for (short i = 0; i < NB_Q8_0; ++i) {
yl[i] = yb[i];
}
for (short row = 0; row < N_R0_Q8_0; row++) {
global char * qs = ax[row][ib].qs + il*NB_Q8_0;
float sumq = 0.f;
for (short iq = 0; iq < NB_Q8_0; ++iq) {
sumq += qs[iq] * yl[iq];
}
sumf[row] += sumq*ax[row][ib].d;
}
yb += N_SIMDWIDTH*NB_Q8_0;
}
global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0;
for (int row = 0; row < N_R0_Q8_0; ++row) {
float tot = sub_group_reduce_add(sumf[row]);
if (get_sub_group_local_id() == 0 && first_row + row < ne01) {
dst_f32[first_row + row] = tot;
}
}
}

View File

@@ -0,0 +1,202 @@
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#ifdef cl_intel_subgroups
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
#else
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
#endif
#ifdef cl_intel_required_subgroup_size
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
#define INTEL_GPU 1
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
#elif defined(cl_qcom_reqd_sub_group_size)
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
#define ADRENO_GPU 1
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
#endif
#define QK8_0 32
typedef struct {
half d; // delta
char qs[QK8_0]; // quants
} block_q8_0;
#define NB_Q8_0 8
#ifdef INTEL_GPU
#define N_R0_Q8_0 4 // number of rows each subgroup works on
#define N_SG_Q8_0 2 // number of subgroups in a work group
#define N_SIMDWIDTH 16 // subgroup size
#elif defined (ADRENO_GPU)
#define N_R0_Q8_0 4
#define N_SG_Q8_0 2
#define N_SIMDWIDTH 64
#endif
#ifdef INTEL_GPU
REQD_SUBGROUP_SIZE_16
#elif defined (ADRENO_GPU)
REQD_SUBGROUP_SIZE_64
#endif
kernel void kernel_mul_mv_q8_0_f32_flat(
global char * src0_q,
global half * src0_d,
global char * src1,
ulong offset1,
global char * dst,
ulong offsetd,
int ne00,
int ne01,
ulong nb01,
ulong nb02,
ulong nb03,
int ne12,
ulong nb11,
ulong nb12,
ulong nb13,
int ne0,
int ne1,
int r2,
int r3
) {
src1 = (global char*)((global char*)src1 + offset1);
dst = (global char*)((global char*)dst + offsetd);
int nb = ne00/QK8_0;
int r0 = get_group_id(0);
int r1 = get_group_id(1);
int im = get_group_id(2);
int first_row = (r0*N_SG_Q8_0 + get_sub_group_id()) * N_R0_Q8_0;
uint i12 = im%ne12;
uint i13 = im/ne12;
ulong offset_src1 = r1*nb11 + i12*nb12 + i13*nb13;
global float * y = (global float *) (src1 + offset_src1);
// pointers to src0 rows
uint offset_src0_base = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03;
global char * ax0, * ax1, * ax2, * ax3;
global half * ad0, * ad1, * ad2, * ad3;
uint offset_src0;
offset_src0 = offset_src0_base + 0*nb01;
offset_src0 = offset_src0/34;
ax0 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0);
ad0 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half));
offset_src0 = offset_src0_base + 1*nb01;
offset_src0 = offset_src0/34;
ax1 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0);
ad1 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half));
offset_src0 = offset_src0_base + 2*nb01;
offset_src0 = offset_src0/34;
ax2 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0);
ad2 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half));
offset_src0 = offset_src0_base + 3*nb01;
offset_src0 = offset_src0/34;
ax3 = (global char *) ((global char *) src0_q + offset_src0*sizeof(char)*QK8_0);
ad3 = (global half *) ((global char *) src0_d + offset_src0*sizeof(half));
const short ix = get_sub_group_local_id()/4;
const short il = get_sub_group_local_id()%4;
global float * yb = y + ix*QK8_0 + il*NB_Q8_0;
float8 yl;
float8 qv;
float4 sumf = 0.f;
float sumq = 0.f;
global char * qs;
// each thread handles NB_Q8_0 quants at a time
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/4) {
yl = vload8(0, yb);
qs = ax0 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s0 += sumq*ad0[ib];
qs = ax1 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s1 += sumq*ad1[ib];
qs = ax2 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s2 += sumq*ad2[ib];
qs = ax3 + ib*sizeof(char)*QK8_0 + il*NB_Q8_0;
qv = convert_float8(vload8(0, qs));
sumq = 0;
sumq += qv.s0*yl.s0;
sumq += qv.s1*yl.s1;
sumq += qv.s2*yl.s2;
sumq += qv.s3*yl.s3;
sumq += qv.s4*yl.s4;
sumq += qv.s5*yl.s5;
sumq += qv.s6*yl.s6;
sumq += qv.s7*yl.s7;
sumf.s3 += sumq*ad3[ib];
yb += N_SIMDWIDTH*NB_Q8_0;
}
global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0;
float4 tot = (float4)(
sub_group_reduce_add(sumf.s0),
sub_group_reduce_add(sumf.s1),
sub_group_reduce_add(sumf.s2),
sub_group_reduce_add(sumf.s3)
);
if (get_sub_group_local_id() == 0) {
if (first_row + 0 < ne01) {
dst_f32[first_row + 0] = tot.s0;
}
if (first_row + 1 < ne01) {
dst_f32[first_row + 1] = tot.s1;
}
if (first_row + 2 < ne01) {
dst_f32[first_row + 2] = tot.s2;
}
if (first_row + 3 < ne01) {
dst_f32[first_row + 3] = tot.s3;
}
}
}