From 77771b2711cb8fb7af9fdc7b6fb98a04cd55f3e0 Mon Sep 17 00:00:00 2001 From: lhez Date: Sat, 16 Aug 2025 00:52:14 +0800 Subject: [PATCH] opencl: add initial mxfp4 support via mv (llama/15270) * opencl: add reference `mul_mv_mxfp4_f32` * opencl: add reference `mul_mv_id` for mxfp4 * Q4_0 tranpose fix for Adreno --------- Co-authored-by: shawngu-quic --- ggml/src/ggml-opencl/CMakeLists.txt | 2 + ggml/src/ggml-opencl/ggml-opencl.cpp | 147 +++++++++++++- .../kernels/mul_mv_id_mxfp4_f32.cl | 189 ++++++++++++++++++ .../ggml-opencl/kernels/mul_mv_mxfp4_f32.cl | 144 +++++++++++++ ggml/src/ggml-opencl/kernels/transpose.cl | 20 ++ 5 files changed, 496 insertions(+), 6 deletions(-) create mode 100644 ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl create mode 100644 ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt index d8290faa..015821f3 100644 --- a/ggml/src/ggml-opencl/CMakeLists.txt +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -82,7 +82,9 @@ set(GGML_OPENCL_KERNELS mul_mv_q4_0_f32_1d_8x_flat mul_mv_q4_0_f32_1d_16x_flat mul_mv_q6_k + mul_mv_mxfp4_f32 mul_mv_id_q4_0_f32_8x_flat + mul_mv_id_mxfp4_f32 mul_mm_f32_f32_l4_lm mul_mm_f16_f32_l4_lm mul diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index fc838684..b8cbf6cf 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -365,6 +365,7 @@ struct ggml_backend_opencl_context { cl_program program_mul_mv_q4_0_f32_1d_8x_flat; cl_program program_mul_mv_q4_0_f32_1d_16x_flat; cl_program program_mul_mv_q6_K; + cl_program program_mul_mv_mxfp4_f32; cl_program program_mul_mv_f16_f16; cl_program program_mul_mv_f16_f32_1row; cl_program program_mul_mv_f16_f32_l4; @@ -398,6 +399,7 @@ struct ggml_backend_opencl_context { cl_program program_conv_2d_f16_f32; cl_program program_tsembd; cl_program program_mul_mv_id_q4_0_f32_8x_flat; + cl_program program_mul_mv_id_mxfp4_f32; cl_program program_mul_mm_f32_f32_l4_lm; cl_program program_mul_mm_f16_f32_l4_lm; @@ -439,6 +441,7 @@ struct ggml_backend_opencl_context { cl_kernel kernel_convert_block_q4_0_noshuffle; cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat; cl_kernel kernel_mul_mv_q6_K_f32; + cl_kernel kernel_mul_mv_mxfp4_f32; cl_kernel kernel_im2col_f32, kernel_im2col_f16; cl_kernel kernel_argsort_f32_i32; cl_kernel kernel_sum_rows_f32; @@ -455,6 +458,7 @@ struct ggml_backend_opencl_context { cl_kernel kernel_conv_2d_f16_f32; cl_kernel kernel_timestep_embedding; cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat; + cl_kernel kernel_mul_mv_id_mxfp4_f32; cl_kernel kernel_mul_mm_f32_f32_l4_lm; cl_kernel kernel_mul_mm_f16_f32_l4_lm; @@ -577,6 +581,7 @@ struct ggml_backend_opencl_context { cl_kernel kernel_transpose_32; cl_kernel kernel_transpose_32_16; cl_kernel kernel_transpose_16; + cl_kernel kernel_transpose_16_4x1; cl_mem A_s_d_max; // max scale buffer size for transpose cl_mem A_q_d_max; // max weight buffer size for transpose @@ -971,6 +976,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve GGML_LOG_CONT("."); } + // mul_mv_mxfp4_f32 + { +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "mul_mv_mxfp4_f32.cl.h" + }; +#else + const std::string kernel_src = read_file("mul_mv_mxfp4_f32.cl"); +#endif + backend_ctx->program_mul_mv_mxfp4_f32 = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); + + CL_CHECK((backend_ctx->kernel_mul_mv_mxfp4_f32 = clCreateKernel(backend_ctx->program_mul_mv_mxfp4_f32, "kernel_mul_mv_mxfp4_f32", &err), err)); + GGML_LOG_CONT("."); + } + // mul_mv_f16_f16 { #ifdef GGML_OPENCL_EMBED_KERNELS @@ -1611,6 +1632,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve GGML_LOG_CONT("."); } + // mul_mv_id_mxfp4_f32 + { +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "mul_mv_id_mxfp4_f32.cl.h" + }; +#else + const std::string kernel_src = read_file("mul_mv_id_mxfp4_f32.cl"); +#endif + backend_ctx->program_mul_mv_id_mxfp4_f32 = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); + + CL_CHECK((backend_ctx->kernel_mul_mv_id_mxfp4_f32 = clCreateKernel(backend_ctx->program_mul_mv_id_mxfp4_f32, "kernel_mul_mv_id_mxfp4_f32", &err), err)); + GGML_LOG_CONT("."); + } + // Adreno kernels #ifdef GGML_OPENCL_USE_ADRENO_KERNELS // transpose @@ -1628,6 +1665,7 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve CL_CHECK((backend_ctx->kernel_transpose_32_16 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_32_16", &err), err)); CL_CHECK((backend_ctx->kernel_transpose_32 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_32", &err), err)); CL_CHECK((backend_ctx->kernel_transpose_16 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_16", &err), err)); + CL_CHECK((backend_ctx->kernel_transpose_16_4x1 = clCreateKernel(backend_ctx->program_transpose, "kernel_transpose_16_4x1", &err), err)); GGML_LOG_CONT("."); } @@ -2552,13 +2590,14 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te return true; } else if (op->src[0]->type == GGML_TYPE_F32) { return op->src[1]->type == GGML_TYPE_F32; - } else if (op->src[0]->type == GGML_TYPE_Q4_0 || + } else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_MXFP4 || op->src[0]->type == GGML_TYPE_Q6_K) { return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); } return false; case GGML_OP_MUL_MAT_ID: - if (op->src[0]->type == GGML_TYPE_Q4_0) { + if (op->src[0]->type == GGML_TYPE_Q4_0 || + op->src[0]->type == GGML_TYPE_MXFP4) { if (op->src[1]->type == GGML_TYPE_F32) { return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); } @@ -2944,7 +2983,10 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, // cl_mem qT_d = clCreateBuffer(context, CL_MEM_READ_WRITE, q_size_bytes, NULL, &err); CL_CHECK(err); - // size_t d_size_bytes = M * (K / 32) / 2 * sizeof(float); + bool K_tile_trans = true; + if ((K / 32) % 4 != 0){ + K_tile_trans =false; + } size_t d_size_bytes = M * (K / 32) * 2; region.origin = 0; region.size = d_size_bytes; @@ -2985,10 +3027,15 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, qT_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); CL_CHECK(err); - img_fmt_1d = { CL_RGBA, CL_HALF_FLOAT }; memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + if (K_tile_trans) { + img_fmt_1d = { CL_RGBA, CL_HALF_FLOAT }; + img_desc_1d.image_width = M * K / 32 / 4; + } else { + img_fmt_1d = { CL_R, CL_HALF_FLOAT }; + img_desc_1d.image_width = M * K / 32; + } img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; - img_desc_1d.image_width = M * K / 32 / 4; img_desc_1d.buffer = extra->d; d_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); CL_CHECK(err); @@ -3024,6 +3071,10 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, int width_s = K / 32 / 4; kernel = backend_ctx->kernel_transpose_16; + if (!K_tile_trans) { + kernel = backend_ctx->kernel_transpose_16_4x1; + width_s = K / 32; + } CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_d_image1D)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &dT_d_image1D)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_s)); @@ -6254,11 +6305,47 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); break; + case GGML_TYPE_MXFP4: { + kernel = backend_ctx->kernel_mul_mv_mxfp4_f32; + + if (backend_ctx->gpu_family == INTEL) { + nth0 = 16; + nth1 = 2; + ndst = nth1*2; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 64; + nth1 = 2; + ndst = nth1*2; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb13)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r3)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(float)*nth0,nullptr)); + break; + } default: GGML_ASSERT(false && "not implemented"); } - if (src0t == GGML_TYPE_Q4_0 || + if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_MXFP4 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K) { @@ -6307,10 +6394,12 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; ggml_tensor_extra_cl * extra2 = (ggml_tensor_extra_cl *)src2->extra; ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + cl_ulong offset0 = extra0->offset + src0->view_offs; cl_ulong offset1 = extra1->offset + src1->view_offs; cl_ulong offset2 = extra2->offset + src2->view_offs; cl_ulong offsetd = extrad->offset + dst->view_offs; @@ -6325,7 +6414,9 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, const int ne03 = src0->ne[3]; const cl_ulong nb00 = src0->nb[0]; + const cl_ulong nb01 = src0->nb[1]; const cl_ulong nb02 = src0->nb[2]; + const cl_ulong nb03 = src0->nb[3]; const int ne10 = src1->ne[0]; const int ne11 = src1->ne[1]; @@ -6334,6 +6425,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, const cl_ulong nb11 = src1->nb[1]; const cl_ulong nb12 = src1->nb[2]; + const cl_ulong nb13 = src1->nb[3]; const int ne20 = src2->ne[0]; const int ne21 = src2->ne[1]; @@ -6401,6 +6493,49 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0, break; } + case GGML_TYPE_MXFP4: { + kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32; + + if (backend_ctx->gpu_family == INTEL) { + sgs = 16; + nsg = 2; + ndst = 2; + } else if (backend_ctx->gpu_family == ADRENO) { + sgs = 64; + nsg = 2; + ndst = 2; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb13)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne20)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne21)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb21)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); + CL_CHECK(clSetKernelArg(kernel, 24, sizeof(float)*sgs,nullptr)); + + break; + } default: GGML_ASSERT(false && "not implemented");; } diff --git a/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl b/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl new file mode 100644 index 00000000..d50bd1fc --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/mul_mv_id_mxfp4_f32.cl @@ -0,0 +1,189 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#ifdef cl_intel_subgroups +#pragma OPENCL EXTENSION cl_intel_subgroups : enable +#else +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#endif + +#ifdef cl_intel_required_subgroup_size +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#endif + +#define QK_MXFP4 32 +typedef struct { + uchar e; // E8M0 + uchar qs[QK_MXFP4/2]; +} block_mxfp4; + +constant static float kvalues_mxfp4_f[16] = { + 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f +}; + +static inline float e8m0_to_fp32(uchar x) { + int bits; + + if (x == 0) { + bits = 0x00400000; + } else { + bits = (uint) x << 23; + } + + return as_float(bits); +} + +#ifdef INTEL_GPU +#define N_R0_MXFP4 2 // number of rows each subgroup works on +#define N_SG_MXFP4 2 // number of subgroups in a work group +#define N_SIMDWIDTH 16 // subgroup size +#elif defined (ADRENO_GPU) +#define N_R0_MXFP4 2 +#define N_SG_MXFP4 2 +#define N_SIMDWIDTH 64 +#endif + +inline void mul_mv_mxfp4_f32( + global char * src0, + global char * src1, + global char * dst, + int ne00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne12, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3, + local char * shmem +) { + local float * shmem_f32 = (local float *) shmem; + int nb = ne00/QK_MXFP4; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = 0; + + int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4; + + uint i12 = im%ne12; + uint i13 = im/ne12; + + ulong offset_src0 = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global block_mxfp4 * x = (global block_mxfp4 *) (src0 + offset_src0); + global float * y = (global float *) (src1 + offset_src1); + + const short ix = get_sub_group_local_id()/2; // 0...15 + const short it = get_sub_group_local_id()%2; // 0 or 1 + + shmem_f32[get_sub_group_local_id()] = kvalues_mxfp4_f[get_sub_group_local_id()%16]; + barrier(CLK_LOCAL_MEM_FENCE); + + float4 yl[4]; + float sumf[N_R0_MXFP4] = {0.f}; + + global float * yb = y + ix * QK_MXFP4 + it * 8; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + global float4 * y4 = (global float4 *)yb; + yl[0] = y4[0]; + yl[1] = y4[4]; + yl[2] = y4[1]; + yl[3] = y4[5]; + + for (short row = 0; row < N_R0_MXFP4; row++) { + global block_mxfp4 * xb = x + row*nb + ib; + global uchar * q2 = (global uchar *)(xb->qs + 8*it); + + float4 acc1 = yl[0]*(float4)(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); + float4 acc2 = yl[1]*(float4)(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); + float4 acc3 = yl[2]*(float4)(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); + float4 acc4 = yl[3]*(float4)(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); + + acc1 = (acc1 + acc3) + (acc2 + acc4); + + sumf[row] += e8m0_to_fp32(xb->e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3)); + } + + yb += (N_SIMDWIDTH/2) * QK_MXFP4; + } + + global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; + + for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) { + float sum_all = sub_group_reduce_add(sumf[row]); + if (get_sub_group_local_id() == 0) { + dst_f32[first_row + row] = sum_all; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mv_id_mxfp4_f32( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global char * src2, + ulong offset2, + global char * dst, + ulong offsetd, + int ne00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne11, + int ne12, + ulong nb11, + ulong nb12, + ulong nb13, + int ne20, + int ne21, + ulong nb21, + int ne0, + int ne1, + int r2, + int r3, + local char * shmem +) { + src0 = (global char *)((global char *)src0 + offset0); + src1 = (global char *)((global char *)src1 + offset1); + src2 = (global char *)((global char *)src2 + offset2); + dst = (global char *)((global char *)dst + offsetd); + + const int iid1 = get_group_id(2)/ne20; + const int idx = get_group_id(2)%ne20; + + int i02 = ((global int *) (src2 + iid1*nb21))[idx]; + + int i11 = idx % ne11; + int i12 = iid1; + + int i1 = idx; + int i2 = i12; + + global char * src0_cur = src0 + i02*nb02; + global char * src1_cur = src1 + i11*nb11 + i12*nb12; + + global char * dst_cur = dst + (i1*ne0 + i2*ne1*ne0)*sizeof(float); + + mul_mv_mxfp4_f32(src0_cur, src1_cur, dst_cur, + ne00, nb01, nb02, nb03, ne12, nb11, nb12, nb13, ne0, ne1, r2, r3, shmem); +} diff --git a/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl b/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl new file mode 100644 index 00000000..9a4d4b9b --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/mul_mv_mxfp4_f32.cl @@ -0,0 +1,144 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +#ifdef cl_intel_subgroups +#pragma OPENCL EXTENSION cl_intel_subgroups : enable +#else +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#endif + +#ifdef cl_intel_required_subgroup_size +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#endif + +#define QK_MXFP4 32 +typedef struct { + uchar e; // E8M0 + uchar qs[QK_MXFP4/2]; +} block_mxfp4; + +constant static float kvalues_mxfp4_f[16] = { + 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f +}; + +static inline float e8m0_to_fp32(uchar x) { + int bits; + + if (x == 0) { + bits = 0x00400000; + } else { + bits = (uint) x << 23; + } + + return as_float(bits); +} + +#ifdef INTEL_GPU +#define N_R0_MXFP4 2 // number of rows each subgroup works on +#define N_SG_MXFP4 2 // number of subgroups in a work group +#define N_SIMDWIDTH 16 // subgroup size +#elif defined (ADRENO_GPU) +#define N_R0_MXFP4 2 +#define N_SG_MXFP4 2 +#define N_SIMDWIDTH 64 +#endif + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mv_mxfp4_f32( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global char * dst, + ulong offsetd, + int ne00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne12, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3, + local char * shmem +) { + src0 = (global char*)((global char*)src0 + offset0); + src1 = (global char*)((global char*)src1 + offset1); + dst = (global char*)((global char*)dst + offsetd); + + local float * shmem_f32 = (local float *) shmem; + int nb = ne00/QK_MXFP4; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + int first_row = (r0 * N_SG_MXFP4 + get_sub_group_id()) * N_R0_MXFP4; + + uint i12 = im%ne12; + uint i13 = im/ne12; + + ulong offset_src0 = first_row*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global block_mxfp4 * x = (global block_mxfp4 *) (src0 + offset_src0); + global float * y = (global float *) (src1 + offset_src1); + + const short ix = get_sub_group_local_id()/2; // 0...15 + const short it = get_sub_group_local_id()%2; // 0 or 1 + + shmem_f32[get_sub_group_local_id()] = kvalues_mxfp4_f[get_sub_group_local_id()%16]; + barrier(CLK_LOCAL_MEM_FENCE); + + float4 yl[4]; + float sumf[N_R0_MXFP4] = {0.f}; + + global float * yb = y + ix * QK_MXFP4 + it * 8; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + global float4 * y4 = (global float4 *)yb; + yl[0] = y4[0]; + yl[1] = y4[4]; + yl[2] = y4[1]; + yl[3] = y4[5]; + + for (short row = 0; row < N_R0_MXFP4; row++) { + global block_mxfp4 * xb = x + row*nb + ib; + global uchar * q2 = (global uchar *)(xb->qs + 8*it); + + float4 acc1 = yl[0]*(float4)(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); + float4 acc2 = yl[1]*(float4)(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); + float4 acc3 = yl[2]*(float4)(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); + float4 acc4 = yl[3]*(float4)(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); + + acc1 = (acc1 + acc3) + (acc2 + acc4); + + sumf[row] += e8m0_to_fp32(xb->e) * ((acc1.s0 + acc1.s1) + (acc1.s2 + acc1.s3)); + } + + yb += (N_SIMDWIDTH/2) * QK_MXFP4; + } + + global float * dst_f32 = (global float *) dst + (ulong)im*ne0*ne1 + (ulong)r1*ne0; + + for (int row = 0; row < N_R0_MXFP4 && first_row + row < ne0; ++row) { + float sum_all = sub_group_reduce_add(sumf[row]); + if (get_sub_group_local_id() == 0) { + dst_f32[first_row + row] = sum_all; + } + } +} diff --git a/ggml/src/ggml-opencl/kernels/transpose.cl b/ggml/src/ggml-opencl/kernels/transpose.cl index a11490b3..536dd560 100644 --- a/ggml/src/ggml-opencl/kernels/transpose.cl +++ b/ggml/src/ggml-opencl/kernels/transpose.cl @@ -24,6 +24,26 @@ kernel void kernel_transpose_16( write_imageh(output, (i_2+3)*rows+j, (half4)(temp0.s3, temp1.s3, temp2.s3, temp3.s3)); } +// Padded kernel for irregular shape +kernel void kernel_transpose_16_4x1( + __read_only image1d_buffer_t input, + __write_only image1d_buffer_t output, + const uint rows, + const uint cols +) { + + const int i = get_global_id(0); + const int j = get_global_id(1); + const int j_2 = j << 2; + + half temp0 = read_imageh(input, (j_2 + 0) * cols + i).x; + half temp1 = read_imageh(input, (j_2 + 1) * cols + i).x; + half temp2 = read_imageh(input, (j_2 + 2) * cols + i).x; + half temp3 = read_imageh(input, (j_2 + 3) * cols + i).x; + + write_imageh(output, i * rows + j, (half4)(temp0, temp1, temp2, temp3)); +} + // 32-bit transpose, loading/storing a 4x4 tile of elements kernel void kernel_transpose_32( __read_only image1d_buffer_t input,