mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	metal : support tensors in mul_mm_id
This commit is contained in:
		@@ -8204,12 +8204,12 @@ kernel void kernel_mul_mm(
 | 
			
		||||
        mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
 | 
			
		||||
    }
 | 
			
		||||
#else
 | 
			
		||||
    auto tA = tensor<threadgroup S0,    dextents<int32_t, 2>, tensor_inline>(sa, dextents<int32_t, 2>(NK,  NR0));
 | 
			
		||||
    auto tB = tensor<threadgroup S1,    dextents<int32_t, 2>, tensor_inline>(sb, dextents<int32_t, 2>(NR1, NK ));
 | 
			
		||||
    auto tA = tensor<threadgroup S0, dextents<int32_t, 2>, tensor_inline>(sa, dextents<int32_t, 2>(NK,  NR0));
 | 
			
		||||
    auto tB = tensor<threadgroup S1, dextents<int32_t, 2>, tensor_inline>(sb, dextents<int32_t, 2>(NR1, NK ));
 | 
			
		||||
 | 
			
		||||
    constexpr auto desc = mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate);
 | 
			
		||||
 | 
			
		||||
    mpp::tensor_ops::matmul2d<desc, execution_simdgroups<4>> mm;
 | 
			
		||||
    mpp::tensor_ops::matmul2d<
 | 
			
		||||
        mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate),
 | 
			
		||||
        execution_simdgroups<4>> mm;
 | 
			
		||||
 | 
			
		||||
    auto cT = mm.get_destination_cooperative_tensor<decltype(tA), decltype(tB), float>();
 | 
			
		||||
#endif
 | 
			
		||||
@@ -8522,31 +8522,63 @@ kernel void kernel_mul_mm_id(
 | 
			
		||||
        ushort tiitg[[thread_index_in_threadgroup]],
 | 
			
		||||
        ushort tiisg[[thread_index_in_simdgroup]],
 | 
			
		||||
        ushort sgitg[[simdgroup_index_in_threadgroup]]) {
 | 
			
		||||
 | 
			
		||||
    threadgroup S0 * sa = (threadgroup S0 *)(shmem);
 | 
			
		||||
    threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096);
 | 
			
		||||
 | 
			
		||||
    const int r0 = tgpig.y;
 | 
			
		||||
    const int r1 = tgpig.x;
 | 
			
		||||
    threadgroup float * sc = (threadgroup float *)(shmem);
 | 
			
		||||
 | 
			
		||||
    constexpr int NR0 = 64;
 | 
			
		||||
    constexpr int NR1 = 32;
 | 
			
		||||
 | 
			
		||||
    constexpr int NK  = 32;
 | 
			
		||||
    constexpr int NL0 = NK/16;
 | 
			
		||||
    constexpr int NL1 = NK/8;
 | 
			
		||||
 | 
			
		||||
    const int im = tgpig.z; // expert
 | 
			
		||||
    const int r0 = tgpig.y*NR0;
 | 
			
		||||
    const int r1 = tgpig.x*NR1;
 | 
			
		||||
 | 
			
		||||
    device const uint32_t * tpe_u32 = (device const uint32_t *) (htpe);
 | 
			
		||||
    device const int32_t  * ids_i32 = (device const int32_t  *) (hids);
 | 
			
		||||
 | 
			
		||||
    const int32_t neh1 = tpe_u32[im];
 | 
			
		||||
 | 
			
		||||
    if (r1*BLOCK_SIZE_N >= neh1) {
 | 
			
		||||
    if (r1 >= neh1) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // if this block is of 64x32 shape or smaller
 | 
			
		||||
    const short n_rows = (args.ne0 - r0*BLOCK_SIZE_M < BLOCK_SIZE_M) ? (args.ne0 - r0*BLOCK_SIZE_M) : BLOCK_SIZE_M;
 | 
			
		||||
    const short n_cols = (    neh1 - r1*BLOCK_SIZE_N < BLOCK_SIZE_N) ? (    neh1 - r1*BLOCK_SIZE_N) : BLOCK_SIZE_N;
 | 
			
		||||
    const short nr0 = (args.ne0 - r0 < NR0) ? (args.ne0 - r0) : NR0;
 | 
			
		||||
    const short nr1 = (    neh1 - r1 < NR1) ? (    neh1 - r1) : NR1;
 | 
			
		||||
 | 
			
		||||
    // a thread shouldn't load data outside of the matrix
 | 
			
		||||
    const short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
 | 
			
		||||
    const short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
 | 
			
		||||
    const short lr0 = ((short)tiitg/NL0) < nr0 ? ((short)tiitg/NL0) : nr0 - 1; // 0 .. 63
 | 
			
		||||
    const short lr1 = ((short)tiitg/NL1) < nr1 ? ((short)tiitg/NL1) : nr1 - 1; // 0 .. 31
 | 
			
		||||
 | 
			
		||||
    const short il0 = (tiitg % NL0);
 | 
			
		||||
 | 
			
		||||
    short il = il0;
 | 
			
		||||
 | 
			
		||||
    const int id = ids_i32[im*args.ne21 + r1 + lr1];
 | 
			
		||||
 | 
			
		||||
    const short i11 = (id % args.ne20) % args.ne11;
 | 
			
		||||
    const short i12 = (id / args.ne20);
 | 
			
		||||
    const short i13 = 0;
 | 
			
		||||
 | 
			
		||||
    const uint64_t offset0 = im*args.nb02 + i13*args.nb03;
 | 
			
		||||
    const short    offset1 = il0/nl;
 | 
			
		||||
 | 
			
		||||
    device const block_q * x = (device const block_q *)(src0 + args.nb01*(r0 + lr0) + offset0) + offset1;
 | 
			
		||||
 | 
			
		||||
    const short iy = 8*(tiitg % NL1);
 | 
			
		||||
 | 
			
		||||
    device const T1 * y = (device const T1 *)(src1
 | 
			
		||||
        + args.nb13*i13
 | 
			
		||||
        + args.nb12*i12
 | 
			
		||||
        + args.nb11*i11
 | 
			
		||||
        + args.nb10*iy);
 | 
			
		||||
 | 
			
		||||
#ifndef GGML_METAL_HAS_TENSOR
 | 
			
		||||
    S0_8x8 ma[4];
 | 
			
		||||
    S1_8x8 mb[2];
 | 
			
		||||
 | 
			
		||||
@@ -8555,39 +8587,36 @@ kernel void kernel_mul_mm_id(
 | 
			
		||||
    for (short i = 0; i < 8; i++){
 | 
			
		||||
        mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
 | 
			
		||||
    }
 | 
			
		||||
#else
 | 
			
		||||
    auto tA = tensor<threadgroup S0, dextents<int32_t, 2>, tensor_inline>(sa, dextents<int32_t, 2>(NK,  NR0));
 | 
			
		||||
    auto tB = tensor<threadgroup S1, dextents<int32_t, 2>, tensor_inline>(sb, dextents<int32_t, 2>(NR1, NK ));
 | 
			
		||||
 | 
			
		||||
    short il = (tiitg % THREAD_PER_ROW);
 | 
			
		||||
    mpp::tensor_ops::matmul2d<
 | 
			
		||||
        mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate),
 | 
			
		||||
        execution_simdgroups<4>> mm;
 | 
			
		||||
 | 
			
		||||
    const int id = ids_i32[im*args.ne21 + r1*BLOCK_SIZE_N + thread_col];
 | 
			
		||||
    auto cT = mm.get_destination_cooperative_tensor<decltype(tA), decltype(tB), float>();
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    const short i11 = (id % args.ne20) % args.ne11;
 | 
			
		||||
    const short i12 = (id / args.ne20);
 | 
			
		||||
    const short i13 = 0;
 | 
			
		||||
 | 
			
		||||
    const uint64_t offset0 = im*args.nb02 + i13*args.nb03;
 | 
			
		||||
    const short    offset1 = il/nl;
 | 
			
		||||
 | 
			
		||||
    device const block_q * x = (device const block_q *)(src0
 | 
			
		||||
        + args.nb01*(r0*BLOCK_SIZE_M + thread_row) + offset0) + offset1;
 | 
			
		||||
 | 
			
		||||
    const short iy = (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL));
 | 
			
		||||
 | 
			
		||||
    device const T1 * y = (device const T1 *)(src1
 | 
			
		||||
        + args.nb13*i13
 | 
			
		||||
        + args.nb12*i12
 | 
			
		||||
        + args.nb11*i11
 | 
			
		||||
        + args.nb10*iy);
 | 
			
		||||
 | 
			
		||||
    for (int loop_k = 0; loop_k < args.ne00; loop_k += BLOCK_SIZE_K) {
 | 
			
		||||
    for (int loop_k = 0; loop_k < args.ne00; loop_k += NK) {
 | 
			
		||||
#ifndef GGML_METAL_HAS_TENSOR
 | 
			
		||||
        // load data and store to threadgroup memory
 | 
			
		||||
        if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) {
 | 
			
		||||
            threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
            // no need for dequantization
 | 
			
		||||
            for (short i = 0; i < 16; i++) {
 | 
			
		||||
                *(sa + SG_MAT_SIZE * ((tiitg/THREAD_PER_ROW/8) \
 | 
			
		||||
                +                     (tiitg%THREAD_PER_ROW)*16 + (i/8)*8) \
 | 
			
		||||
                +                     (tiitg/THREAD_PER_ROW)%8  + (i&7)*8) = loop_k + 16*il + i < args.ne00 ? ((device T0 *) x)[i] : 0;
 | 
			
		||||
                const short sx = 2*il0 + i/8;
 | 
			
		||||
                const short sy = (tiitg/NL0)/8;
 | 
			
		||||
 | 
			
		||||
              //const short lx = i%8;
 | 
			
		||||
              //const short ly = (tiitg/NL0)%8;
 | 
			
		||||
                const short lx = (tiitg/NL0)%8;
 | 
			
		||||
                const short ly = i%8;
 | 
			
		||||
 | 
			
		||||
                const short ib = 8*sx + sy;
 | 
			
		||||
 | 
			
		||||
                *(sa + 64*ib + 8*ly + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0;
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            S0_4x4 temp_a;
 | 
			
		||||
@@ -8596,85 +8625,188 @@ kernel void kernel_mul_mm_id(
 | 
			
		||||
            threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
            FOR_UNROLL (short i = 0; i < 16; i++) {
 | 
			
		||||
                *(sa + SG_MAT_SIZE * ((tiitg/THREAD_PER_ROW/8) \
 | 
			
		||||
                +                     (tiitg%THREAD_PER_ROW)*16 + (i/8)*8) \
 | 
			
		||||
                +                     (tiitg/THREAD_PER_ROW)%8  + (i&7)*8) = temp_a[i/4][i%4];
 | 
			
		||||
                const short sx = 2*il0 + i/8;
 | 
			
		||||
                const short sy = (tiitg/NL0)/8;
 | 
			
		||||
 | 
			
		||||
              //const short lx = i%8;
 | 
			
		||||
              //const short ly = (tiitg/NL0)%8;
 | 
			
		||||
                const short lx = (tiitg/NL0)%8;
 | 
			
		||||
                const short ly = i%8;
 | 
			
		||||
 | 
			
		||||
                const short ib = 8*sx + sy;
 | 
			
		||||
 | 
			
		||||
                // NOTE: this is massively slower.. WTF?
 | 
			
		||||
                //sa[64*ib + 8*ly + lx] = temp_a[i/4][i%4];
 | 
			
		||||
 | 
			
		||||
                *(sa + 64*ib + 8*ly + lx) = temp_a[i/4][i%4];
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (FC_mul_mm_bc_inp) {
 | 
			
		||||
            for (short i = 0; i < 8; ++i) {
 | 
			
		||||
                sb[32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL) + i] = loop_k + iy + i < args.ne00 ? (S1) ((device T1 *) y)[i] : 0;
 | 
			
		||||
                const short sx = (tiitg%NL1);
 | 
			
		||||
                const short sy = (tiitg/NL1)/8;
 | 
			
		||||
 | 
			
		||||
                const short lx = i;
 | 
			
		||||
                const short ly = (tiitg/NL1)%8;
 | 
			
		||||
              //const short lx = (tiitg/NL1)%8;
 | 
			
		||||
              //const short ly = i;
 | 
			
		||||
 | 
			
		||||
                const short ib = 4*sx + sy;
 | 
			
		||||
 | 
			
		||||
                *(sb + 64*ib + 8*ly + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0;
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            *(threadgroup S1_2x4 *)(sb + 32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL)) = (S1_2x4)(*((device T1_2x4 *) y));
 | 
			
		||||
            const short sx = (tiitg%NL1);
 | 
			
		||||
            const short sy = (tiitg/NL1)/8;
 | 
			
		||||
 | 
			
		||||
            const short dx = sx;
 | 
			
		||||
            const short dy = sy;
 | 
			
		||||
 | 
			
		||||
            const short ly = (tiitg/NL1)%8;
 | 
			
		||||
 | 
			
		||||
            const short ib = 4*sx + sy;
 | 
			
		||||
 | 
			
		||||
            *(threadgroup S1_2x4 *)(sb + 64*ib + 8*ly) = (S1_2x4)(*((device T1_2x4 *) y));
 | 
			
		||||
        }
 | 
			
		||||
#else
 | 
			
		||||
        // load data and store to threadgroup memory
 | 
			
		||||
        if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) {
 | 
			
		||||
            threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
            // no need for dequantization
 | 
			
		||||
            for (short i = 0; i < 16; i++) {
 | 
			
		||||
                const short sx = 2*il0 + i/8;
 | 
			
		||||
                const short sy = (tiitg/NL0)/8;
 | 
			
		||||
 | 
			
		||||
                const short lx = i%8;
 | 
			
		||||
                const short ly = (tiitg/NL0)%8;
 | 
			
		||||
                //const short lx = (tiitg/NL0)%8;
 | 
			
		||||
                //const short ly = i%8;
 | 
			
		||||
 | 
			
		||||
                *(sa + NK*(8*sy + ly) + 8*sx + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0;
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            S0_4x4 temp_a;
 | 
			
		||||
            dequantize_func(x, il, temp_a);
 | 
			
		||||
 | 
			
		||||
            threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
            FOR_UNROLL (short i = 0; i < 16; i++) {
 | 
			
		||||
                const short sx = 2*il0 + i/8;
 | 
			
		||||
                const short sy = (tiitg/NL0)/8;
 | 
			
		||||
 | 
			
		||||
                const short lx = i%8;
 | 
			
		||||
                const short ly = (tiitg/NL0)%8;
 | 
			
		||||
                //const short lx = (tiitg/NL0)%8;
 | 
			
		||||
                //const short ly = i%8;
 | 
			
		||||
 | 
			
		||||
                *(sa + NK*(8*sy + ly) + 8*sx + lx) = temp_a[i/4][i%4];
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (FC_mul_mm_bc_inp) {
 | 
			
		||||
            for (short i = 0; i < 8; ++i) {
 | 
			
		||||
                const short sx = (tiitg%NL1);
 | 
			
		||||
                const short sy = (tiitg/NL1)/8;
 | 
			
		||||
 | 
			
		||||
                const short lx = i;
 | 
			
		||||
                const short ly = (tiitg/NL1)%8;
 | 
			
		||||
                //const short lx = (tiitg/NL1)%8;
 | 
			
		||||
                //const short ly = i;
 | 
			
		||||
 | 
			
		||||
                *(sb + NK*(8*sy + ly) + 8*sx + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0;
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            const short sx = (tiitg%NL1);
 | 
			
		||||
            const short sy = (tiitg/NL1)/8;
 | 
			
		||||
 | 
			
		||||
            //const short lx = i;
 | 
			
		||||
            const short ly = (tiitg/NL1)%8;
 | 
			
		||||
            //const short lx = (tiitg/NL1)%8;
 | 
			
		||||
            //const short ly = i;
 | 
			
		||||
 | 
			
		||||
            *(threadgroup S1_2x4 *)(sb + NK*(8*sy + ly) + 8*sx) = (S1_2x4)(*((device T1_2x4 *) y));
 | 
			
		||||
        }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
        il = (il + 2 < nl) ? il + 2 : il % 2;
 | 
			
		||||
        x  = (il < 2) ? x + (2 + nl - 1)/nl : x;
 | 
			
		||||
        y += BLOCK_SIZE_K;
 | 
			
		||||
 | 
			
		||||
        y += NK;
 | 
			
		||||
 | 
			
		||||
        threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
#ifndef GGML_METAL_HAS_TENSOR
 | 
			
		||||
        // load matrices from threadgroup memory and conduct outer products
 | 
			
		||||
        threadgroup const S0 * lsma = (sa + THREAD_MAT_M*SG_MAT_SIZE*(sgitg%2));
 | 
			
		||||
        threadgroup const S1 * lsmb = (sb + THREAD_MAT_N*SG_MAT_SIZE*(sgitg/2));
 | 
			
		||||
        threadgroup const S0 * lsma = (sa + 4*64*(sgitg%2));
 | 
			
		||||
        threadgroup const S1 * lsmb = (sb + 2*64*(sgitg/2));
 | 
			
		||||
 | 
			
		||||
        #pragma unroll(4)
 | 
			
		||||
        for (short ik = 0; ik < BLOCK_SIZE_K/8; ik++) {
 | 
			
		||||
            #pragma unroll(4)
 | 
			
		||||
            for (short i = 0; i < 4; i++) {
 | 
			
		||||
                simdgroup_load(ma[i], lsma + SG_MAT_SIZE * i);
 | 
			
		||||
        FOR_UNROLL (short ik = 0; ik < NK/8; ik++) {
 | 
			
		||||
            simdgroup_barrier(mem_flags::mem_none);
 | 
			
		||||
 | 
			
		||||
            FOR_UNROLL (short i = 0; i < 4; i++) {
 | 
			
		||||
                simdgroup_load(ma[i], lsma + 64*i, 8, 0, false);
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            simdgroup_barrier(mem_flags::mem_none);
 | 
			
		||||
 | 
			
		||||
            #pragma unroll(2)
 | 
			
		||||
            for (short i = 0; i < 2; i++) {
 | 
			
		||||
                simdgroup_load(mb[i], lsmb + SG_MAT_SIZE * i);
 | 
			
		||||
            FOR_UNROLL (short i = 0; i < 2; i++) {
 | 
			
		||||
                simdgroup_load(mb[i], lsmb + 64*i, 8, 0, false);
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            #pragma unroll(8)
 | 
			
		||||
            for (short i = 0; i < 8; i++){
 | 
			
		||||
            simdgroup_barrier(mem_flags::mem_none);
 | 
			
		||||
 | 
			
		||||
            FOR_UNROLL (short i = 0; i < 8; i++){
 | 
			
		||||
                simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]);
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            lsma += (BLOCK_SIZE_M/SG_MAT_ROW)*SG_MAT_SIZE;
 | 
			
		||||
            lsmb += (BLOCK_SIZE_N/SG_MAT_ROW)*SG_MAT_SIZE;
 | 
			
		||||
            lsma += 8*64;
 | 
			
		||||
            lsmb += 4*64;
 | 
			
		||||
        }
 | 
			
		||||
#else
 | 
			
		||||
        auto sA = tA.slice(0, 0);
 | 
			
		||||
        auto sB = tB.slice(0, 0);
 | 
			
		||||
 | 
			
		||||
        mm.run(sB, sA, cT);
 | 
			
		||||
#endif
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // block is smaller than 64x32, we should avoid writing data outside of the matrix
 | 
			
		||||
    threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
    threadgroup float * temp_str = ((threadgroup float *) shmem) \
 | 
			
		||||
                                 + 32*(sgitg&1) + (16*(sgitg >> 1))*BLOCK_SIZE_M;
 | 
			
		||||
#ifdef GGML_METAL_HAS_TENSOR
 | 
			
		||||
    auto tC = tensor<threadgroup float, dextents<int32_t, 2>, tensor_inline>(sc, dextents<int32_t, 2>(NR0, NR1));
 | 
			
		||||
    cT.store(tC);
 | 
			
		||||
#else
 | 
			
		||||
    threadgroup float * temp_str = ((threadgroup float *) shmem) + 32*(sgitg&1) + (16*(sgitg >> 1))*NR0;
 | 
			
		||||
 | 
			
		||||
    #pragma unroll(8)
 | 
			
		||||
    for (short i = 0; i < 8; i++) {
 | 
			
		||||
        simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*BLOCK_SIZE_M*(i/4), BLOCK_SIZE_M);
 | 
			
		||||
        simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*NR0*(i/4), NR0, 0, false);
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    threadgroup_barrier(mem_flags::mem_threadgroup);
 | 
			
		||||
 | 
			
		||||
    for (short j = sgitg; j < n_cols; j += 4) {
 | 
			
		||||
        const int id = ids_i32[im*args.ne21 + r1*BLOCK_SIZE_N + j];
 | 
			
		||||
    for (short j = sgitg; j < nr1; j += 4) {
 | 
			
		||||
        const int id = ids_i32[im*args.ne21 + r1 + j];
 | 
			
		||||
 | 
			
		||||
        const short ide = id % args.ne20;
 | 
			
		||||
        const short idt = id / args.ne20;
 | 
			
		||||
 | 
			
		||||
        device float  * D  = (device float  *) dst + (r0*BLOCK_SIZE_M) + ide*args.ne0 + idt*args.ne1*args.ne0;
 | 
			
		||||
        device float  * D  = (device float  *) dst + r0 + ide*args.ne0 + idt*args.ne1*args.ne0;
 | 
			
		||||
        device float4 * D4 = (device float4 *) D;
 | 
			
		||||
 | 
			
		||||
        threadgroup float  * C  = (threadgroup float  *) shmem + (j*BLOCK_SIZE_M);
 | 
			
		||||
        threadgroup float  * C  = (threadgroup float  *) shmem + j*NR0;
 | 
			
		||||
        threadgroup float4 * C4 = (threadgroup float4 *) C;
 | 
			
		||||
 | 
			
		||||
        int i = tiisg;
 | 
			
		||||
        for (; i < n_rows/4; i += 32) {
 | 
			
		||||
        for (; i < nr0/4; i += 32) {
 | 
			
		||||
            *(D4 + i) = *(C4 + i);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        i = (4*(n_rows/4)) + tiisg;
 | 
			
		||||
        for (; i < n_rows; i += 32) {
 | 
			
		||||
        i = (4*(nr0/4)) + tiisg;
 | 
			
		||||
        for (; i < nr0; i += 32) {
 | 
			
		||||
            *(D + i) = *(C + i);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user