mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	metal : rework mat-mat multiplication
This commit is contained in:
		| @@ -8145,17 +8145,24 @@ kernel void kernel_mul_mm( | |||||||
|     threadgroup S0 * sa = (threadgroup S0 *)(shmem); |     threadgroup S0 * sa = (threadgroup S0 *)(shmem); | ||||||
|     threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096); |     threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096); | ||||||
|  |  | ||||||
|     const int r0 = tgpig.y; |     constexpr int NR0 = 64; | ||||||
|     const int r1 = tgpig.x; |     constexpr int NR1 = 32; | ||||||
|  |  | ||||||
|  |     constexpr int NK  = 32; | ||||||
|  |     constexpr int NL0 = NK/16; | ||||||
|  |     constexpr int NL1 = NK/8; | ||||||
|  |  | ||||||
|     const int im = tgpig.z; |     const int im = tgpig.z; | ||||||
|  |     const int r0 = tgpig.y*NR0; | ||||||
|  |     const int r1 = tgpig.x*NR1; | ||||||
|  |  | ||||||
|     // if this block is of 64x32 shape or smaller |     // if this block is of 64x32 shape or smaller | ||||||
|     const short n_rows = (args.ne0 - r0*BLOCK_SIZE_M < BLOCK_SIZE_M) ? (args.ne0 - r0*BLOCK_SIZE_M) : BLOCK_SIZE_M; |     const short nr0 = (args.ne0 - r0 < NR0) ? (args.ne0 - r0) : NR0; | ||||||
|     const short n_cols = (args.ne1 - r1*BLOCK_SIZE_N < BLOCK_SIZE_N) ? (args.ne1 - r1*BLOCK_SIZE_N) : BLOCK_SIZE_N; |     const short nr1 = (args.ne1 - r1 < NR1) ? (args.ne1 - r1) : NR1; | ||||||
|  |  | ||||||
|     // a thread shouldn't load data outside of the matrix |     // a thread shouldn't load data outside of the matrix | ||||||
|     const short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; |     const short lr0 = ((short)tiitg/NL0) < nr0 ? ((short)tiitg/NL0) : nr0 - 1; // 0 .. 63 | ||||||
|     const short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; |     const short lr1 = ((short)tiitg/NL1) < nr1 ? ((short)tiitg/NL1) : nr1 - 1; // 0 .. 31 | ||||||
|  |  | ||||||
|     S0_8x8 ma[4]; |     S0_8x8 ma[4]; | ||||||
|     S1_8x8 mb[2]; |     S1_8x8 mb[2]; | ||||||
| @@ -8166,35 +8173,44 @@ kernel void kernel_mul_mm( | |||||||
|         mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f); |         mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     short il = (tiitg % THREAD_PER_ROW); |     const short il0 = (tiitg % NL0); | ||||||
|  |  | ||||||
|  |     short il = il0; | ||||||
|  |  | ||||||
|     const int i12 = im%args.ne12; |     const int i12 = im%args.ne12; | ||||||
|     const int i13 = im/args.ne12; |     const int i13 = im/args.ne12; | ||||||
|  |  | ||||||
|     const uint64_t offset0 = (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; |     const uint64_t offset0 = (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; | ||||||
|     const short    offset1 = il/nl; |     const short    offset1 = il0/nl; | ||||||
|  |  | ||||||
|     device const block_q * x = (device const block_q *)(src0 |     device const block_q * x = (device const block_q *)(src0 + args.nb01*(r0 + lr0) + offset0) + offset1; | ||||||
|         + args.nb01*(r0*BLOCK_SIZE_M + thread_row) + offset0) + offset1; |  | ||||||
|  |  | ||||||
|     const short iy = (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL)); |     const short iy = 8*(tiitg % NL1); | ||||||
|  |  | ||||||
|     device const T1 * y = (device const T1 *)(src1 |     device const T1 * y = (device const T1 *)(src1 | ||||||
|         + args.nb13*i13 |         + args.nb13*i13 | ||||||
|         + args.nb12*i12 |         + args.nb12*i12 | ||||||
|         + args.nb11*(r1*BLOCK_SIZE_N + thread_col) |         + args.nb11*(r1 + lr1) | ||||||
|         + args.nb10*iy); |         + args.nb10*iy); | ||||||
|  |  | ||||||
|     for (int loop_k = 0; loop_k < args.ne00; loop_k += BLOCK_SIZE_K) { |     for (int loop_k = 0; loop_k < args.ne00; loop_k += NK) { | ||||||
|         // load data and store to threadgroup memory |         // load data and store to threadgroup memory | ||||||
|         if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) { |         if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) { | ||||||
|             threadgroup_barrier(mem_flags::mem_threadgroup); |             threadgroup_barrier(mem_flags::mem_threadgroup); | ||||||
|  |  | ||||||
|             // no need for dequantization |             // no need for dequantization | ||||||
|             for (short i = 0; i < 16; i++) { |             for (short i = 0; i < 16; i++) { | ||||||
|                 *(sa + SG_MAT_SIZE * ((tiitg/THREAD_PER_ROW/8) \ |                 const short sx = 2*il0 + i/8; | ||||||
|                 +                     (tiitg%THREAD_PER_ROW)*16 + (i/8)*8) \ |                 const short sy = (tiitg/NL0)/8; | ||||||
|                 +                     (tiitg/THREAD_PER_ROW)%8  + (i&7)*8) = loop_k + 16*il + i < args.ne00 ? ((device T0 *) x)[i] : 0; |  | ||||||
|  |               //const short lx = i%8; | ||||||
|  |               //const short ly = (tiitg/NL0)%8; | ||||||
|  |                 const short lx = (tiitg/NL0)%8; | ||||||
|  |                 const short ly = i%8; | ||||||
|  |  | ||||||
|  |                 const short ib = 8*sx + sy; | ||||||
|  |  | ||||||
|  |                 *(sa + 64*ib + 8*ly + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0; | ||||||
|             } |             } | ||||||
|         } else { |         } else { | ||||||
|             S0_4x4 temp_a; |             S0_4x4 temp_a; | ||||||
| @@ -8203,91 +8219,122 @@ kernel void kernel_mul_mm( | |||||||
|             threadgroup_barrier(mem_flags::mem_threadgroup); |             threadgroup_barrier(mem_flags::mem_threadgroup); | ||||||
|  |  | ||||||
|             FOR_UNROLL (short i = 0; i < 16; i++) { |             FOR_UNROLL (short i = 0; i < 16; i++) { | ||||||
|                 *(sa + SG_MAT_SIZE * ((tiitg/THREAD_PER_ROW/8) \ |                 const short sx = 2*il0 + i/8; | ||||||
|                 +                     (tiitg%THREAD_PER_ROW)*16 + (i/8)*8) \ |                 const short sy = (tiitg/NL0)/8; | ||||||
|                 +                     (tiitg/THREAD_PER_ROW)%8  + (i&7)*8) = temp_a[i/4][i%4]; |  | ||||||
|  |               //const short lx = i%8; | ||||||
|  |               //const short ly = (tiitg/NL0)%8; | ||||||
|  |                 const short lx = (tiitg/NL0)%8; | ||||||
|  |                 const short ly = i%8; | ||||||
|  |  | ||||||
|  |                 const short ib = 8*sx + sy; | ||||||
|  |  | ||||||
|  |                 // NOTE: this is massively slower.. WTF? | ||||||
|  |                 //sa[64*ib + 8*ly + lx] = temp_a[i/4][i%4]; | ||||||
|  |  | ||||||
|  |                 *(sa + 64*ib + 8*ly + lx) = temp_a[i/4][i%4]; | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         if (FC_mul_mm_bc_inp) { |         if (FC_mul_mm_bc_inp) { | ||||||
|             for (short i = 0; i < 8; ++i) { |             for (short i = 0; i < 8; ++i) { | ||||||
|                 sb[32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL) + i] = loop_k + iy + i < args.ne00 ? (S1) ((device T1 *) y)[i] : 0; |                 const short sx = (tiitg%NL1); | ||||||
|  |                 const short sy = (tiitg/NL1)/8; | ||||||
|  |  | ||||||
|  |                 const short lx = i; | ||||||
|  |                 const short ly = (tiitg/NL1)%8; | ||||||
|  |               //const short lx = (tiitg/NL1)%8; | ||||||
|  |               //const short ly = i; | ||||||
|  |  | ||||||
|  |                 const short ib = 4*sx + sy; | ||||||
|  |  | ||||||
|  |                 *(sb + 64*ib + 8*ly + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0; | ||||||
|             } |             } | ||||||
|         } else { |         } else { | ||||||
|             *(threadgroup S1_2x4 *)(sb + 32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL)) = (S1_2x4)(*((device T1_2x4 *) y)); |             const short sx = (tiitg%NL1); | ||||||
|  |             const short sy = (tiitg/NL1)/8; | ||||||
|  |  | ||||||
|  |             const short dx = sx; | ||||||
|  |             const short dy = sy; | ||||||
|  |  | ||||||
|  |             const short ly = (tiitg/NL1)%8; | ||||||
|  |  | ||||||
|  |             const short ib = 4*sx + sy; | ||||||
|  |  | ||||||
|  |             *(threadgroup S1_2x4 *)(sb + 64*ib + 8*ly) = (S1_2x4)(*((device T1_2x4 *) y)); | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         il = (il + 2 < nl) ? il + 2 : il % 2; |         il = (il + 2 < nl) ? il + 2 : il % 2; | ||||||
|         x  = (il < 2) ? x + (2 + nl - 1)/nl : x; |         x  = (il < 2) ? x + (2 + nl - 1)/nl : x; | ||||||
|         y += BLOCK_SIZE_K; |  | ||||||
|  |         y += NK; | ||||||
|  |  | ||||||
|  |         // load matrices from threadgroup memory and conduct outer products | ||||||
|  |         threadgroup const S0 * lsma = (sa + 4*64*(sgitg%2)); | ||||||
|  |         threadgroup const S1 * lsmb = (sb + 2*64*(sgitg/2)); | ||||||
|  |  | ||||||
|         threadgroup_barrier(mem_flags::mem_threadgroup); |         threadgroup_barrier(mem_flags::mem_threadgroup); | ||||||
|  |  | ||||||
|         // load matrices from threadgroup memory and conduct outer products |         FOR_UNROLL (short ik = 0; ik < NK/8; ik++) { | ||||||
|         threadgroup const S0 * lsma = (sa + THREAD_MAT_M*SG_MAT_SIZE*(sgitg%2)); |  | ||||||
|         threadgroup const S1 * lsmb = (sb + THREAD_MAT_N*SG_MAT_SIZE*(sgitg/2)); |  | ||||||
|  |  | ||||||
|         #pragma unroll(4) |  | ||||||
|         for (short ik = 0; ik < BLOCK_SIZE_K/8; ik++) { |  | ||||||
|             simdgroup_barrier(mem_flags::mem_none); |             simdgroup_barrier(mem_flags::mem_none); | ||||||
|  |  | ||||||
|             #pragma unroll(4) |             FOR_UNROLL (short i = 0; i < 4; i++) { | ||||||
|             for (short i = 0; i < 4; i++) { |                 simdgroup_load(ma[i], lsma + 64*i, 8, 0, false); | ||||||
|                 simdgroup_load(ma[i], lsma + SG_MAT_SIZE * i); |  | ||||||
|             } |  | ||||||
|  |  | ||||||
|             #pragma unroll(2) |  | ||||||
|             for (short i = 0; i < 2; i++) { |  | ||||||
|                 simdgroup_load(mb[i], lsmb + SG_MAT_SIZE * i); |  | ||||||
|             } |             } | ||||||
|  |  | ||||||
|             simdgroup_barrier(mem_flags::mem_none); |             simdgroup_barrier(mem_flags::mem_none); | ||||||
|  |  | ||||||
|             #pragma unroll(8) |             FOR_UNROLL (short i = 0; i < 2; i++) { | ||||||
|             for (short i = 0; i < 8; i++){ |                 simdgroup_load(mb[i], lsmb + 64*i, 8, 0, false); | ||||||
|  |             } | ||||||
|  |  | ||||||
|  |             simdgroup_barrier(mem_flags::mem_none); | ||||||
|  |  | ||||||
|  |             FOR_UNROLL (short i = 0; i < 8; i++){ | ||||||
|                 simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]); |                 simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]); | ||||||
|             } |             } | ||||||
|  |  | ||||||
|             lsma += (BLOCK_SIZE_M/SG_MAT_ROW)*SG_MAT_SIZE; |             lsma += 8*64; | ||||||
|             lsmb += (BLOCK_SIZE_N/SG_MAT_ROW)*SG_MAT_SIZE; |             lsmb += 4*64; | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     if (!FC_mul_mm_bc_out || ((r0 + 1) * BLOCK_SIZE_M <= args.ne0 && (r1 + 1) * BLOCK_SIZE_N <= args.ne1)) { |     if (!FC_mul_mm_bc_out || (r0 + NR0 <= args.ne0 && r1 + NR1 <= args.ne1)) { | ||||||
|         // if no bounds checks on the output are needed, we can directly write to device memory |         // if no bounds checks on the output are needed, we can directly write to device memory | ||||||
|         device float * C = (device float *) dst + |         device float * C = (device float *) dst + | ||||||
|             (BLOCK_SIZE_M * r0 + 32*(sgitg &  1)) + \ |             (r0 + 32*(sgitg &  1)) + \ | ||||||
|             (BLOCK_SIZE_N * r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0; |             (r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0; | ||||||
|  |  | ||||||
|         for (short i = 0; i < 8; i++) { |         for (short i = 0; i < 8; i++) { | ||||||
|             simdgroup_store(mc[i], C + 8 * (i%4) + 8 * args.ne0 * (i/4), args.ne0); |             simdgroup_store(mc[i], C + 8 * (i%4) + 8 * args.ne0 * (i/4), args.ne0, 0, false); | ||||||
|         } |         } | ||||||
|     } else { |     } else { | ||||||
|         // block is smaller than 64x32, we should avoid writing data outside of the matrix |         // block is smaller than 64x32, we should avoid writing data outside of the matrix | ||||||
|         threadgroup_barrier(mem_flags::mem_threadgroup); |         threadgroup_barrier(mem_flags::mem_threadgroup); | ||||||
|         threadgroup float * temp_str = ((threadgroup float *) shmem) \ |  | ||||||
|                                      + 32*(sgitg&1) + (16*(sgitg >> 1))*BLOCK_SIZE_M; |         threadgroup float * temp_str = ((threadgroup float *) shmem) + 32*(sgitg&1) + (16*(sgitg >> 1))*NR0; | ||||||
|  |  | ||||||
|         for (short i = 0; i < 8; i++) { |         for (short i = 0; i < 8; i++) { | ||||||
|             simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*BLOCK_SIZE_M*(i/4), BLOCK_SIZE_M); |             simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*NR0*(i/4), NR0, 0, false); | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         threadgroup_barrier(mem_flags::mem_threadgroup); |         threadgroup_barrier(mem_flags::mem_threadgroup); | ||||||
|  |  | ||||||
|         if (sgitg == 0) { |         if (sgitg == 0) { | ||||||
|             for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { |             for (int j = tiitg; j < nr1; j += NR1) { | ||||||
|                 device float  * D  = (device float  *) dst + (r0*BLOCK_SIZE_M) + (r1*BLOCK_SIZE_N + j)*args.ne0 + im*args.ne1*args.ne0; |                 device float  * D  = (device float  *) dst + r0 + (r1 + j)*args.ne0 + im*args.ne1*args.ne0; | ||||||
|                 device float4 * D4 = (device float4 *) D; |                 device float4 * D4 = (device float4 *) D; | ||||||
|  |  | ||||||
|                 threadgroup float  * C  = temp_str + (j*BLOCK_SIZE_M); |                 threadgroup float  * C  = temp_str + (j*NR0); | ||||||
|                 threadgroup float4 * C4 = (threadgroup float4 *) C; |                 threadgroup float4 * C4 = (threadgroup float4 *) C; | ||||||
|  |  | ||||||
|                 int i = 0; |                 int i = 0; | ||||||
|                 for (; i < n_rows/4; i++) { |                 for (; i < nr0/4; i++) { | ||||||
|                     *(D4 + i) = *(C4 + i); |                     *(D4 + i) = *(C4 + i); | ||||||
|                 } |                 } | ||||||
|  |  | ||||||
|                 i *= 4; |                 i *= 4; | ||||||
|                 for (; i < n_rows; i++) { |                 for (; i < nr0; i++) { | ||||||
|                     *(D + i) = *(C + i); |                     *(D + i) = *(C + i); | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
|   | |||||||
| @@ -1265,7 +1265,7 @@ struct test_case { | |||||||
|                 //    printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]); |                 //    printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]); | ||||||
|                 //} |                 //} | ||||||
|                 //printf("\n"); |                 //printf("\n"); | ||||||
|                 //exit(1); |                 exit(1); | ||||||
|                 ud->ok = false; |                 ud->ok = false; | ||||||
|             } |             } | ||||||
|             return true; |             return true; | ||||||
| @@ -6589,7 +6589,7 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() { | |||||||
|     test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F32, GGML_TYPE_F32, 16, 32, 32, { 1,  1}, {1, 1}, {0, 1, 2, 3}, true, 3)); |     test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F32, GGML_TYPE_F32, 16, 32, 32, { 1,  1}, {1, 1}, {0, 1, 2, 3}, true, 3)); | ||||||
|     test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F32, GGML_TYPE_F32, 64, 77, 77, {12,1}, {1,1})); |     test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F32, GGML_TYPE_F32, 64, 77, 77, {12,1}, {1,1})); | ||||||
|  |  | ||||||
| #if 0 | #if 1 | ||||||
|     // test the mat-mat path for Metal |     // test the mat-mat path for Metal | ||||||
|     for (int k = 1; k < 512; ++k) { |     for (int k = 1; k < 512; ++k) { | ||||||
|         test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 127, k, {12,1}, {1,1})); |         test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 127, k, {12,1}, {1,1})); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov