mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-10 10:27:03 +00:00
opencl: optimize mxfp4 kernels (#16037)
- flatten mxfp4 and packed fp4->fp16 bit-wise convert function (replace lut) - MoE kernel optimizations --------- Co-authored-by: Li He <lih@qti.qualcomm.com>
This commit is contained in:
@@ -116,3 +116,49 @@ kernel void kernel_convert_block_q4_0_noshuffle(
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// block_q4_0
|
||||
//------------------------------------------------------------------------------
|
||||
#define QK_MXFP4 32
|
||||
struct block_mxfp4 {
|
||||
uchar e; // E8M0
|
||||
uchar qs[QK_MXFP4 / 2];
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// kernel_convert_block_mxfp4
|
||||
// Convert the block_mxfp4 format to 2 separate arrays (AOS -> SOA).
|
||||
// This kernel does not deshuffle the bits.
|
||||
//------------------------------------------------------------------------------
|
||||
kernel void kernel_convert_block_mxfp4(
|
||||
global struct block_mxfp4 * src0,
|
||||
global uchar * dst_q,
|
||||
global uchar * dst_e
|
||||
) {
|
||||
global struct block_mxfp4 * b = (global struct block_mxfp4 *) src0 + get_global_id(0);
|
||||
global uchar * q = (global uchar *) dst_q + QK_MXFP4 / 2 * get_global_id(0);
|
||||
global uchar * e = (global uchar *) dst_e + get_global_id(0);
|
||||
|
||||
*e = b->e;
|
||||
|
||||
for (int i = 0; i < QK_MXFP4 / 2; ++i) {
|
||||
q[i] = b->qs[i];
|
||||
}
|
||||
}
|
||||
|
||||
kernel void kernel_restore_block_mxfp4(
|
||||
global uchar * src_q,
|
||||
global half * src_e,
|
||||
global struct block_mxfp4 * dst
|
||||
) {
|
||||
global struct block_mxfp4 * b = (global struct block_mxfp4 *) dst + get_global_id(0);
|
||||
global uchar * q = (global uchar *) src_q + QK_MXFP4 / 2 * get_global_id(0);
|
||||
global uchar * e = (global uchar *) src_e + get_global_id(0);
|
||||
|
||||
b->e = *e;
|
||||
for (int i = 0; i < QK_MXFP4 / 2; ++i) {
|
||||
b->qs[i] = q[i];
|
||||
}
|
||||
}
|
||||
|
||||
176
ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32_flat.cl
Normal file
176
ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32_flat.cl
Normal file
@@ -0,0 +1,176 @@
|
||||
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
|
||||
|
||||
#ifdef cl_intel_subgroups
|
||||
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
|
||||
#else
|
||||
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
|
||||
#endif
|
||||
|
||||
#ifdef cl_intel_required_subgroup_size
|
||||
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
|
||||
#define INTEL_GPU 1
|
||||
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
|
||||
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
|
||||
#elif defined(cl_qcom_reqd_sub_group_size)
|
||||
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
|
||||
#define ADRENO_GPU 1
|
||||
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
|
||||
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
|
||||
#endif
|
||||
|
||||
#define QK_MXFP4 32
|
||||
|
||||
static inline half4 mxfp4_to_fp16_packed(ushort fp4x4) {
|
||||
ushort2 fp16_packed_a, fp16_packed_b, bias_a, bias_b, sign_a, sign_b;
|
||||
fp16_packed_a.lo = (fp4x4 << 9) & 0x0E00;
|
||||
fp16_packed_a.hi = (fp4x4 << 5) & 0x0E00;
|
||||
fp16_packed_b.lo = (fp4x4 << 1) & 0x0E00;
|
||||
fp16_packed_b.hi = (fp4x4 >> 3) & 0x0E00;
|
||||
|
||||
bias_a.lo = (fp16_packed_a.lo == 0) ? 0x0 : 0x3800;
|
||||
bias_a.hi = (fp16_packed_a.hi == 0) ? 0x0 : 0x3800;
|
||||
bias_b.lo = (fp16_packed_b.lo == 0) ? 0x0 : 0x3800;
|
||||
bias_b.hi = (fp16_packed_b.hi == 0) ? 0x0 : 0x3800;
|
||||
|
||||
fp16_packed_a.lo = (fp16_packed_a.lo == 0x0200) ? 0x0 : fp16_packed_a.lo;
|
||||
fp16_packed_a.hi = (fp16_packed_a.hi == 0x0200) ? 0x0 : fp16_packed_a.hi;
|
||||
fp16_packed_b.lo = (fp16_packed_b.lo == 0x0200) ? 0x0 : fp16_packed_b.lo;
|
||||
fp16_packed_b.hi = (fp16_packed_b.hi == 0x0200) ? 0x0 : fp16_packed_b.hi;
|
||||
|
||||
sign_a.lo = (fp4x4 << 12) & 0x8000;
|
||||
sign_a.hi = (fp4x4 << 8) & 0x8000;
|
||||
sign_b.lo = (fp4x4 << 4) & 0x8000;
|
||||
sign_b.hi = fp4x4 & 0x8000;
|
||||
|
||||
fp16_packed_a = sign_a + bias_a + fp16_packed_a;
|
||||
fp16_packed_b = sign_b + bias_b + fp16_packed_b;
|
||||
|
||||
return as_half4((ushort4)(fp16_packed_a, fp16_packed_b));
|
||||
}
|
||||
|
||||
static inline float e8m0_to_fp32(uchar x) {
|
||||
int bits;
|
||||
bits = (x == 0) ? 0x00400000 : ((uint) x << 23);
|
||||
return as_float(bits);
|
||||
}
|
||||
|
||||
#ifdef INTEL_GPU
|
||||
#define N_R0_MXFP4 2 // number of rows each subgroup works on
|
||||
#define N_SG_MXFP4 2 // number of subgroups in a work group
|
||||
#define N_SIMDWIDTH 16 // subgroup size
|
||||
#elif defined (ADRENO_GPU)
|
||||
#define N_R0_MXFP4 4
|
||||
#define N_SG_MXFP4 1
|
||||
#define N_SIMDWIDTH 64
|
||||
#define SRC0Q_IMG
|
||||
#endif
|
||||
|
||||
kernel void kernel_mul_mv_id_mxfp4_f32_flat(
|
||||
#ifdef SRC0Q_IMG
|
||||
__read_only image1d_buffer_t src0_q,
|
||||
#else
|
||||
global uchar * src0_q,
|
||||
#endif
|
||||
global uchar * src0_e,
|
||||
global uchar * src1,
|
||||
ulong offset1,
|
||||
global uchar * src2,
|
||||
ulong offset2,
|
||||
global uchar * dst,
|
||||
ulong offsetd,
|
||||
int ne00,
|
||||
ulong nb01,
|
||||
ulong nb02,
|
||||
ulong nb03,
|
||||
int ne11,
|
||||
int ne12,
|
||||
ulong nb11,
|
||||
ulong nb12,
|
||||
ulong nb13,
|
||||
int ne20,
|
||||
int ne21,
|
||||
ulong nb21,
|
||||
int ne0,
|
||||
int ne1,
|
||||
int r2,
|
||||
int r3
|
||||
) {
|
||||
dst = dst + offsetd;
|
||||
|
||||
const int iid1 = get_group_id(2) / ne20;
|
||||
const int idx = get_group_id(2) % ne20;
|
||||
|
||||
uint i02 = ((global uint *) (src2 + offset2 + iid1 * nb21))[idx];
|
||||
|
||||
int i11 = idx % ne11;
|
||||
|
||||
int nb = ne00 / QK_MXFP4;
|
||||
|
||||
uint src0_off = i02*nb02;
|
||||
src0_off /= 17; // 17 = sizeof(block_mxfp4)
|
||||
|
||||
src0_e = src0_e + src0_off;
|
||||
|
||||
dst = dst + (idx * ne0 + iid1 * ne1 * ne0) * sizeof(float);
|
||||
|
||||
int r0 = get_group_id(0);
|
||||
int r1 = get_group_id(1);
|
||||
|
||||
int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4;
|
||||
|
||||
uint offset_src0 = first_row*nb01;
|
||||
offset_src0 /= 17; // 17 = sizeof(block_mxfp4)
|
||||
#ifdef SRC0Q_IMG
|
||||
ulong offset_q = src0_off + offset_src0;
|
||||
#else
|
||||
src0_q = src0_q + src0_off*16;
|
||||
global uchar16 * x_q = (global uchar16 *)(src0_q) + offset_src0;
|
||||
#endif
|
||||
global uchar * x_e = src0_e + offset_src0;
|
||||
|
||||
const short ix = get_sub_group_local_id() >> 1;
|
||||
const short it = get_sub_group_local_id() & 1;
|
||||
|
||||
float sumf[N_R0_MXFP4] = {0.f};
|
||||
|
||||
src1 = src1 + offset1 + i11 * nb11 + iid1 * nb12;
|
||||
global float * y = (global float *) (src1 + r1 * nb11);
|
||||
global float * yb = y + ix * QK_MXFP4 + it * 8;
|
||||
|
||||
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH / 2) {
|
||||
global float4 * y4 = (global float4 *)yb;
|
||||
|
||||
#pragma unroll
|
||||
for (short row = 0; row < N_R0_MXFP4; row++) {
|
||||
uchar xb_e = x_e[row * nb + ib];
|
||||
#ifdef SRC0Q_IMG
|
||||
ushort4 xb_q = as_ushort4(read_imageui(src0_q, (offset_q + row * nb + ib) * 2 + it).xy);
|
||||
#else
|
||||
ushort4 xb_q = vload4(0, (global ushort *)((global uchar *)(x_q + row * nb + ib) + 8 * it));
|
||||
#endif
|
||||
|
||||
half4 fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s0);
|
||||
half4 fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s1);
|
||||
float4 acc1 = y4[0] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2);
|
||||
acc1 += y4[4] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3);
|
||||
|
||||
fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s2);
|
||||
fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s3);
|
||||
acc1 += y4[1] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2);
|
||||
acc1 += y4[5] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3);
|
||||
|
||||
sumf[row] += e8m0_to_fp32(xb_e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3));
|
||||
}
|
||||
|
||||
yb += (N_SIMDWIDTH / 2) * QK_MXFP4;
|
||||
}
|
||||
|
||||
global float * dst_f32 = (global float *)dst + (ulong)r1 * ne0;
|
||||
|
||||
for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) {
|
||||
float sum_all = sub_group_reduce_add(sumf[row]);
|
||||
if (get_sub_group_local_id() == 0) {
|
||||
dst_f32[first_row + row] = sum_all;
|
||||
}
|
||||
}
|
||||
}
|
||||
167
ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32_flat.cl
Normal file
167
ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32_flat.cl
Normal file
@@ -0,0 +1,167 @@
|
||||
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
|
||||
|
||||
#ifdef cl_intel_subgroups
|
||||
#pragma OPENCL EXTENSION cl_intel_subgroups : enable
|
||||
#else
|
||||
#pragma OPENCL EXTENSION cl_khr_subgroups : enable
|
||||
#endif
|
||||
|
||||
#ifdef cl_intel_required_subgroup_size
|
||||
#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable
|
||||
#define INTEL_GPU 1
|
||||
#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16)))
|
||||
#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32)))
|
||||
#elif defined(cl_qcom_reqd_sub_group_size)
|
||||
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
|
||||
#define ADRENO_GPU 1
|
||||
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
|
||||
#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full")))
|
||||
#endif
|
||||
|
||||
#define QK_MXFP4 32
|
||||
|
||||
static inline half4 mxfp4_to_fp16_packed(ushort fp4x4) {
|
||||
ushort2 fp16_packed_a, fp16_packed_b, bias_a, bias_b, sign_a, sign_b;
|
||||
fp16_packed_a.lo = (fp4x4 << 9) & 0x0E00;
|
||||
fp16_packed_a.hi = (fp4x4 << 5) & 0x0E00;
|
||||
fp16_packed_b.lo = (fp4x4 << 1) & 0x0E00;
|
||||
fp16_packed_b.hi = (fp4x4 >> 3) & 0x0E00;
|
||||
|
||||
bias_a.lo = (fp16_packed_a.lo == 0) ? 0x0 : 0x3800;
|
||||
bias_a.hi = (fp16_packed_a.hi == 0) ? 0x0 : 0x3800;
|
||||
bias_b.lo = (fp16_packed_b.lo == 0) ? 0x0 : 0x3800;
|
||||
bias_b.hi = (fp16_packed_b.hi == 0) ? 0x0 : 0x3800;
|
||||
|
||||
fp16_packed_a.lo = (fp16_packed_a.lo == 0x0200) ? 0x0 : fp16_packed_a.lo;
|
||||
fp16_packed_a.hi = (fp16_packed_a.hi == 0x0200) ? 0x0 : fp16_packed_a.hi;
|
||||
fp16_packed_b.lo = (fp16_packed_b.lo == 0x0200) ? 0x0 : fp16_packed_b.lo;
|
||||
fp16_packed_b.hi = (fp16_packed_b.hi == 0x0200) ? 0x0 : fp16_packed_b.hi;
|
||||
|
||||
sign_a.lo = (fp4x4 << 12) & 0x8000;
|
||||
sign_a.hi = (fp4x4 << 8) & 0x8000;
|
||||
sign_b.lo = (fp4x4 << 4) & 0x8000;
|
||||
sign_b.hi = fp4x4 & 0x8000;
|
||||
|
||||
fp16_packed_a = sign_a + bias_a + fp16_packed_a;
|
||||
fp16_packed_b = sign_b + bias_b + fp16_packed_b;
|
||||
|
||||
return as_half4((ushort4)(fp16_packed_a, fp16_packed_b));
|
||||
}
|
||||
|
||||
static inline float e8m0_to_fp32(uchar x) {
|
||||
int bits;
|
||||
bits = (x == 0) ? 0x00400000 : ((uint) x << 23);
|
||||
return as_float(bits);
|
||||
}
|
||||
|
||||
#ifdef INTEL_GPU
|
||||
#define N_R0_MXFP4 2 // number of rows each subgroup works on
|
||||
#define N_SG_MXFP4 2 // number of subgroups in a work group
|
||||
#define N_SIMDWIDTH 16 // subgroup size
|
||||
#elif defined (ADRENO_GPU)
|
||||
#define N_R0_MXFP4 2
|
||||
#define N_SG_MXFP4 2
|
||||
#define N_SIMDWIDTH 64
|
||||
#define SRC0Q_IMG
|
||||
#endif
|
||||
|
||||
#ifdef INTEL_GPU
|
||||
REQD_SUBGROUP_SIZE_16
|
||||
#elif defined (ADRENO_GPU)
|
||||
REQD_SUBGROUP_SIZE_64
|
||||
#endif
|
||||
kernel void kernel_mul_mv_mxfp4_f32_flat(
|
||||
#ifdef SRC0Q_IMG
|
||||
__read_only image1d_buffer_t src0_q,
|
||||
#else
|
||||
global uchar * src0_q,
|
||||
#endif
|
||||
global uchar * src0_e,
|
||||
global uchar * src1,
|
||||
ulong offset1,
|
||||
global uchar * dst,
|
||||
ulong offsetd,
|
||||
int ne00,
|
||||
ulong nb01,
|
||||
ulong nb02,
|
||||
ulong nb03,
|
||||
int ne12,
|
||||
ulong nb11,
|
||||
ulong nb12,
|
||||
ulong nb13,
|
||||
int ne0,
|
||||
int ne1,
|
||||
int r2,
|
||||
int r3
|
||||
) {
|
||||
src1 = src1 + offset1;
|
||||
dst = dst + offsetd;
|
||||
|
||||
int nb = ne00 / QK_MXFP4;
|
||||
|
||||
int r0 = get_group_id(0);
|
||||
int r1 = get_group_id(1);
|
||||
int im = get_group_id(2);
|
||||
|
||||
int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4;
|
||||
|
||||
uint i12 = im % ne12;
|
||||
uint i13 = im / ne12;
|
||||
|
||||
uint offset_src0 = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03;
|
||||
// 17 = sizeof(block_mxfp4)
|
||||
offset_src0 /= 17;
|
||||
#ifdef SRC0Q_IMG
|
||||
ulong offset_q = offset_src0;
|
||||
#else
|
||||
global uchar16 * x_q = (global uchar16 *)(src0_q) + offset_src0;
|
||||
#endif
|
||||
global uchar * x_e = src0_e + offset_src0;
|
||||
|
||||
ulong offset_src1 = r1 * nb11 + i12 * nb12 + i13 * nb13;
|
||||
global float * y = (global float *)(src1 + offset_src1);
|
||||
|
||||
const short ix = get_sub_group_local_id() >> 1; // 0...15
|
||||
const short it = get_sub_group_local_id() & 1; // 0 or 1
|
||||
|
||||
float sumf[N_R0_MXFP4] = {0.f};
|
||||
|
||||
global float * yb = y + ix * QK_MXFP4 + it * 8;
|
||||
|
||||
for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) {
|
||||
global float4 * y4 = (global float4 *)yb;
|
||||
|
||||
#pragma unroll
|
||||
for (short row = 0; row < N_R0_MXFP4; row++) {
|
||||
uchar xb_e = x_e[row * nb + ib];
|
||||
#ifdef SRC0Q_IMG
|
||||
ushort4 xb_q = as_ushort4(read_imageui(src0_q, (offset_q + row * nb + ib) * 2 + it).xy);
|
||||
#else
|
||||
ushort4 xb_q = vload4(0, (global ushort *)((global uchar *)(x_q + row * nb + ib) + 8 * it));
|
||||
#endif
|
||||
|
||||
half4 fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s0);
|
||||
half4 fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s1);
|
||||
float4 acc1 = y4[0] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2);
|
||||
acc1 += y4[4] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3);
|
||||
|
||||
fp16x4_0 = mxfp4_to_fp16_packed(xb_q.s2);
|
||||
fp16x4_1 = mxfp4_to_fp16_packed(xb_q.s3);
|
||||
acc1 += y4[1] * (float4)(fp16x4_0.s0, fp16x4_0.s2, fp16x4_1.s0, fp16x4_1.s2);
|
||||
acc1 += y4[5] * (float4)(fp16x4_0.s1, fp16x4_0.s3, fp16x4_1.s1, fp16x4_1.s3);
|
||||
|
||||
sumf[row] += e8m0_to_fp32(xb_e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3));
|
||||
}
|
||||
|
||||
yb += (N_SIMDWIDTH/2) * QK_MXFP4;
|
||||
}
|
||||
|
||||
global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0;
|
||||
|
||||
for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) {
|
||||
float sum_all = sub_group_reduce_add(sumf[row]);
|
||||
if (get_sub_group_local_id() == 0) {
|
||||
dst_f32[first_row + row] = sum_all;
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user