mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	fix MUSA compiler warning (#12704)
* fix MUSA compiler warning * replace (void) with GGML_UNUSED
This commit is contained in:
		| @@ -4,13 +4,14 @@ template <size_t split_d_inner, size_t d_conv> | |||||||
| static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float * __restrict__ src1, | static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float * __restrict__ src1, | ||||||
|                                     const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1, |                                     const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1, | ||||||
|                                     float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2, |                                     float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2, | ||||||
|                                     const int nc, const int ncs, const int nr, const int n_t, const int n_s) { |                                     const int64_t n_t) { | ||||||
|  |     GGML_UNUSED(src0_nb0); | ||||||
|     const int tid  = threadIdx.x; |     const int tid  = threadIdx.x; | ||||||
|     const int bidx = blockIdx.x; |     const int bidx = blockIdx.x; | ||||||
|     const int bidy = blockIdx.y; |     const int bidy = blockIdx.y; | ||||||
|  |  | ||||||
|     const float * x_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1); |     const float * x_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1); | ||||||
|     const float * w_block = (const float *) ((char *) src1 + bidy * split_d_inner * src1_nb1); |     const float * w_block = (const float *) ((const char *) src1 + bidy * split_d_inner * src1_nb1); | ||||||
|     float *       y_block = (float *) ((char *) dst + bidx * dst_nb2 + bidy * split_d_inner * dst_nb0); |     float *       y_block = (float *) ((char *) dst + bidx * dst_nb2 + bidy * split_d_inner * dst_nb0); | ||||||
|  |  | ||||||
|     const int stride_x = src0_nb1 / sizeof(float); |     const int stride_x = src0_nb1 / sizeof(float); | ||||||
| @@ -21,15 +22,15 @@ static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float | |||||||
|     float w[d_conv] = { 0.0f }; |     float w[d_conv] = { 0.0f }; | ||||||
|  |  | ||||||
| #pragma unroll | #pragma unroll | ||||||
|     for (int j = 0; j < d_conv; j++) { |     for (size_t j = 0; j < d_conv; j++) { | ||||||
|         w[j] = w_block[tid * stride_w + j]; |         w[j] = w_block[tid * stride_w + j]; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     for (int i = 0; i < n_t; i++) { |     for (int64_t i = 0; i < n_t; i++) { | ||||||
|         float sumf = 0.0f; |         float sumf = 0.0f; | ||||||
|  |  | ||||||
|         if (i == 0) { |         if (i == 0) { | ||||||
|             for (int j = 0; j < d_conv; j++) { |             for (size_t j = 0; j < d_conv; j++) { | ||||||
|                 x[j] = x_block[tid * stride_x + j]; |                 x[j] = x_block[tid * stride_x + j]; | ||||||
|             } |             } | ||||||
|         } else { |         } else { | ||||||
| @@ -37,27 +38,26 @@ static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float | |||||||
|         } |         } | ||||||
|  |  | ||||||
| #pragma unroll | #pragma unroll | ||||||
|         for (int j = 0; j < d_conv; j++) { |         for (size_t j = 0; j < d_conv; j++) { | ||||||
|             sumf += x[(i + j) % d_conv] * w[j]; |             sumf += x[(i + j) % d_conv] * w[j]; | ||||||
|         } |         } | ||||||
|         y_block[i * stride_y + tid] = sumf; |         y_block[i * stride_y + tid] = sumf; | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
| template <size_t split_d_inner, size_t d_conv, size_t split_n_t> | template <size_t split_d_inner, size_t d_conv, int64_t split_n_t> | ||||||
| static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, const float * __restrict__ src1, | static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, const float * __restrict__ src1, | ||||||
|                                                const int src0_nb0, const int src0_nb1, const int src0_nb2, |                                                const int src0_nb0, const int src0_nb1, const int src0_nb2, | ||||||
|                                                const int src1_nb1, float * __restrict__ dst, const int dst_nb0, |                                                const int src1_nb1, float * __restrict__ dst, const int dst_nb0, | ||||||
|                                                const int dst_nb1, const int dst_nb2, const int nc, const int ncs, |                                                const int dst_nb1, const int dst_nb2, const int64_t n_t) { | ||||||
|                                                const int nr, const int n_t, const int n_s) { |  | ||||||
|     const int tid  = threadIdx.x; |     const int tid  = threadIdx.x; | ||||||
|     const int bidx = blockIdx.x; |     const int bidx = blockIdx.x; | ||||||
|     const int bidy = blockIdx.y; |     const int bidy = blockIdx.y; | ||||||
|     const int bidz = blockIdx.z; |     const int bidz = blockIdx.z; | ||||||
|  |  | ||||||
|     const float * x_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1 + |     const float * x_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1 + | ||||||
|                                              bidz * split_n_t * src0_nb0); |                                              bidz * split_n_t * src0_nb0); | ||||||
|     const float * w_block = (const float *) ((char *) src1 + bidy * split_d_inner * src1_nb1); |     const float * w_block = (const float *) ((const char *) src1 + bidy * split_d_inner * src1_nb1); | ||||||
|     float *       y_block = |     float *       y_block = | ||||||
|         (float *) ((char *) dst + bidx * dst_nb2 + bidz * split_n_t * dst_nb1 + bidy * split_d_inner * dst_nb0); |         (float *) ((char *) dst + bidx * dst_nb2 + bidz * split_n_t * dst_nb1 + bidy * split_d_inner * dst_nb0); | ||||||
|  |  | ||||||
| @@ -69,17 +69,17 @@ static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, | |||||||
|     float w[d_conv] = { 0.0f }; |     float w[d_conv] = { 0.0f }; | ||||||
|  |  | ||||||
| #pragma unroll | #pragma unroll | ||||||
|     for (int j = 0; j < d_conv; j++) { |     for (size_t j = 0; j < d_conv; j++) { | ||||||
|         w[j] = w_block[tid * stride_w + j]; |         w[j] = w_block[tid * stride_w + j]; | ||||||
|     } |     } | ||||||
|  |  | ||||||
| #pragma unroll | #pragma unroll | ||||||
|     for (int i = 0; i < split_n_t; i++) { |     for (int64_t i = 0; i < split_n_t; i++) { | ||||||
|         if (bidz * split_n_t + i < n_t) { |         if (bidz * split_n_t + i < n_t) { | ||||||
|             float sumf = 0.0f; |             float sumf = 0.0f; | ||||||
|  |  | ||||||
|             if (i == 0) { |             if (i == 0) { | ||||||
|                 for (int j = 0; j < d_conv; j++) { |                 for (size_t j = 0; j < d_conv; j++) { | ||||||
|                     x[j] = x_block[tid * stride_x + j]; |                     x[j] = x_block[tid * stride_x + j]; | ||||||
|                 } |                 } | ||||||
|             } else { |             } else { | ||||||
| @@ -87,7 +87,7 @@ static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, | |||||||
|             } |             } | ||||||
|  |  | ||||||
| #pragma unroll | #pragma unroll | ||||||
|             for (int j = 0; j < d_conv; j++) { |             for (size_t j = 0; j < d_conv; j++) { | ||||||
|                 sumf += x[(i + j) % d_conv] * w[j]; |                 sumf += x[(i + j) % d_conv] * w[j]; | ||||||
|             } |             } | ||||||
|             y_block[i * stride_y + tid] = sumf; |             y_block[i * stride_y + tid] = sumf; | ||||||
| @@ -97,8 +97,8 @@ static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, | |||||||
|  |  | ||||||
| static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int src0_nb0, const int src0_nb1, | static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int src0_nb0, const int src0_nb1, | ||||||
|                               const int src0_nb2, const int src1_nb1, float * dst, const int dst_nb0, const int dst_nb1, |                               const int src0_nb2, const int src1_nb1, float * dst, const int dst_nb0, const int dst_nb1, | ||||||
|                               const int dst_nb2, const int nc, const int ncs, const int nr, const int n_t, |                               const int dst_nb2, const int64_t nc, const int64_t nr, const int64_t n_t, | ||||||
|                               const int n_s, cudaStream_t stream) { |                               const int64_t n_s, cudaStream_t stream) { | ||||||
|     const int threads = 128; |     const int threads = 128; | ||||||
|     GGML_ASSERT(nr % threads == 0); |     GGML_ASSERT(nr % threads == 0); | ||||||
|  |  | ||||||
| @@ -106,18 +106,16 @@ static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int | |||||||
|         const dim3 blocks(n_s, (nr + threads - 1) / threads, 1); |         const dim3 blocks(n_s, (nr + threads - 1) / threads, 1); | ||||||
|         if (nc == 4) { |         if (nc == 4) { | ||||||
|             ssm_conv_f32<threads, 4><<<blocks, threads, 0, stream>>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, |             ssm_conv_f32<threads, 4><<<blocks, threads, 0, stream>>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, | ||||||
|                                                                      dst, dst_nb0, dst_nb1, dst_nb2, nc, ncs, nr, n_t, |                                                                      dst, dst_nb0, dst_nb1, dst_nb2, n_t); | ||||||
|                                                                      n_s); |  | ||||||
|         } else { |         } else { | ||||||
|             GGML_ABORT("Only support kernel size = 4  now."); |             GGML_ABORT("Only support kernel size = 4  now."); | ||||||
|         } |         } | ||||||
|     } else { |     } else { | ||||||
|         if (nc == 4) { |         if (nc == 4) { | ||||||
|             const int split_n_t = 32; |             const int64_t split_n_t = 32; | ||||||
|             dim3          blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t); |             dim3          blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t); | ||||||
|             ssm_conv_long_token_f32<threads, 4, split_n_t> |             ssm_conv_long_token_f32<threads, 4, split_n_t><<<blocks, threads, 0, stream>>>( | ||||||
|                 <<<blocks, threads, 0, stream>>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, |                 src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t); | ||||||
|                                                  dst_nb1, dst_nb2, nc, ncs, nr, n_t, n_s); |  | ||||||
|         } else { |         } else { | ||||||
|             GGML_ABORT("Only support kernel size = 4 right now."); |             GGML_ABORT("Only support kernel size = 4 right now."); | ||||||
|         } |         } | ||||||
| @@ -128,11 +126,10 @@ void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { | |||||||
|     const struct ggml_tensor * src0 = dst->src[0];  // conv_x |     const struct ggml_tensor * src0 = dst->src[0];  // conv_x | ||||||
|     const struct ggml_tensor * src1 = dst->src[1];  // conv1d.weight |     const struct ggml_tensor * src1 = dst->src[1];  // conv1d.weight | ||||||
|  |  | ||||||
|     const int nc  = src1->ne[0];                    // d_conv |     const int64_t nc  = src1->ne[0];                // d_conv | ||||||
|     const int ncs = src0->ne[0];                    // d_conv - 1 + n_t |     const int64_t nr  = src0->ne[1];                // d_inner | ||||||
|     const int nr  = src0->ne[1];                    // d_inner |     const int64_t n_t = dst->ne[1];                 // tokens per sequence | ||||||
|     const int n_t = dst->ne[1];                     // tokens per sequence |     const int64_t n_s = dst->ne[2];                 // number of sequences in the batch | ||||||
|     const int n_s = dst->ne[2];                     // number of sequences in the batch |  | ||||||
|  |  | ||||||
|     GGML_ASSERT(dst->ne[0] == nr); |     GGML_ASSERT(dst->ne[0] == nr); | ||||||
|     GGML_ASSERT(src0->nb[0] == sizeof(float)); |     GGML_ASSERT(src0->nb[0] == sizeof(float)); | ||||||
| @@ -147,5 +144,5 @@ void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { | |||||||
|     GGML_ASSERT(src0->type == GGML_TYPE_F32); |     GGML_ASSERT(src0->type == GGML_TYPE_F32); | ||||||
|     GGML_ASSERT(dst->type == GGML_TYPE_F32); |     GGML_ASSERT(dst->type == GGML_TYPE_F32); | ||||||
|     ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2], src1->nb[1], dst_d, dst->nb[0], dst->nb[1], |     ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2], src1->nb[1], dst_d, dst->nb[0], dst->nb[1], | ||||||
|                       dst->nb[2], nc, ncs, nr, n_t, n_s, stream); |                       dst->nb[2], nc, nr, n_t, n_s, stream); | ||||||
| } | } | ||||||
|   | |||||||
| @@ -1,10 +1,5 @@ | |||||||
| #include "ssm-scan.cuh" | #include "ssm-scan.cuh" | ||||||
|  |  | ||||||
| // #include <cuda_runtime.h> |  | ||||||
| // static __device__ void global_to_shared(const float *src, float *dst) { |  | ||||||
| //   asm volatile("cp.async."); |  | ||||||
| // } |  | ||||||
|  |  | ||||||
| template <size_t splitD, size_t N> | template <size_t splitD, size_t N> | ||||||
| __global__ void __launch_bounds__(splitD, 2) | __global__ void __launch_bounds__(splitD, 2) | ||||||
|     ssm_scan_f32(const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2, |     ssm_scan_f32(const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2, | ||||||
| @@ -12,7 +7,9 @@ __global__ void __launch_bounds__(splitD, 2) | |||||||
|                  const int src0_nb1, const int src0_nb2, const int src1_nb0, const int src1_nb1, const int src1_nb2, |                  const int src0_nb1, const int src0_nb2, const int src1_nb0, const int src1_nb1, const int src1_nb2, | ||||||
|                  const int src1_nb3, const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1, |                  const int src1_nb3, const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1, | ||||||
|                  const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2, |                  const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2, | ||||||
|                  float * __restrict__ dst, const int D, const int L, const int B) { |                  float * __restrict__ dst, const int64_t L) { | ||||||
|  |     GGML_UNUSED(src1_nb0); | ||||||
|  |     GGML_UNUSED(src2_nb0); | ||||||
|     const int bidx = blockIdx.x;  // split along B |     const int bidx = blockIdx.x;  // split along B | ||||||
|     const int bidy = blockIdx.y;  // split along D |     const int bidy = blockIdx.y;  // split along D | ||||||
|     const int tid  = threadIdx.x; |     const int tid  = threadIdx.x; | ||||||
| @@ -25,12 +22,12 @@ __global__ void __launch_bounds__(splitD, 2) | |||||||
|     float *                 smem_A     = smem; |     float *                 smem_A     = smem; | ||||||
|     float *                 smem_s0    = smem_A + splitD * stride_sA; |     float *                 smem_s0    = smem_A + splitD * stride_sA; | ||||||
|  |  | ||||||
|     const float * s0_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * splitD * src0_nb1); |     const float * s0_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * splitD * src0_nb1); | ||||||
|     const float * x_block  = (const float *) ((char *) src1 + (bidx * src1_nb2) + bidy * splitD * sizeof(float)); |     const float * x_block  = (const float *) ((const char *) src1 + (bidx * src1_nb2) + bidy * splitD * sizeof(float)); | ||||||
|     const float * dt_block = (const float *) ((char *) src2 + (bidx * src2_nb2) + bidy * splitD * sizeof(float)); |     const float * dt_block = (const float *) ((const char *) src2 + (bidx * src2_nb2) + bidy * splitD * sizeof(float)); | ||||||
|     const float * A_block  = (const float *) ((char *) src3 + bidy * splitD * src3_nb1); |     const float * A_block  = (const float *) ((const char *) src3 + bidy * splitD * src3_nb1); | ||||||
|     const float * B_block  = (const float *) ((char *) src4 + (bidx * src4_nb2)); |     const float * B_block  = (const float *) ((const char *) src4 + (bidx * src4_nb2)); | ||||||
|     const float * C_block  = (const float *) ((char *) src5 + (bidx * src5_nb2)); |     const float * C_block  = (const float *) ((const char *) src5 + (bidx * src5_nb2)); | ||||||
|     float *       y_block  = (float *) ((char *) dst + (bidx * src1_nb2) + bidy * splitD * sizeof(float)); |     float *       y_block  = (float *) ((char *) dst + (bidx * src1_nb2) + bidy * splitD * sizeof(float)); | ||||||
|     float *       s_block  = (float *) ((char *) dst + src1_nb3 + bidx * src0_nb2 + bidy * splitD * src0_nb1); |     float *       s_block  = (float *) ((char *) dst + src1_nb3 + bidx * src0_nb2 + bidy * splitD * src0_nb1); | ||||||
|  |  | ||||||
| @@ -46,7 +43,7 @@ __global__ void __launch_bounds__(splitD, 2) | |||||||
|     // can N not be 16? for example 32? |     // can N not be 16? for example 32? | ||||||
|     if (N == 16) { |     if (N == 16) { | ||||||
| #pragma unroll | #pragma unroll | ||||||
|         for (int i = 0; i < splitD / 4; i += 2) { |         for (size_t i = 0; i < splitD / 4; i += 2) { | ||||||
|             float value = A_block[(wid * warpSize + i) * stride_A + wtid]; |             float value = A_block[(wid * warpSize + i) * stride_A + wtid]; | ||||||
|             // todo: bank conflict |             // todo: bank conflict | ||||||
|             // I am always confused with how to use the swizzling method to solve |             // I am always confused with how to use the swizzling method to solve | ||||||
| @@ -54,7 +51,7 @@ __global__ void __launch_bounds__(splitD, 2) | |||||||
|             smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; |             smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; | ||||||
|         } |         } | ||||||
| #pragma unroll | #pragma unroll | ||||||
|         for (int i = 0; i < splitD / 4; i += 2) { |         for (size_t i = 0; i < splitD / 4; i += 2) { | ||||||
|             float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid]; |             float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid]; | ||||||
|             smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; |             smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; | ||||||
|         } |         } | ||||||
| @@ -62,7 +59,7 @@ __global__ void __launch_bounds__(splitD, 2) | |||||||
|  |  | ||||||
|     __syncthreads(); |     __syncthreads(); | ||||||
|  |  | ||||||
|     for (int i = 0; i < L; i++) { |     for (int64_t i = 0; i < L; i++) { | ||||||
|         float dt_soft_plus = dt_block[i * stride_dt + tid]; |         float dt_soft_plus = dt_block[i * stride_dt + tid]; | ||||||
|         if (dt_soft_plus <= 20.0f) { |         if (dt_soft_plus <= 20.0f) { | ||||||
|             dt_soft_plus = log1pf(exp(dt_soft_plus)); |             dt_soft_plus = log1pf(exp(dt_soft_plus)); | ||||||
| @@ -70,7 +67,7 @@ __global__ void __launch_bounds__(splitD, 2) | |||||||
|         float x_dt = x_block[i * stride_x + tid] * dt_soft_plus; |         float x_dt = x_block[i * stride_x + tid] * dt_soft_plus; | ||||||
|         float sumf = 0.0f; |         float sumf = 0.0f; | ||||||
| #pragma unroll | #pragma unroll | ||||||
|         for (int j = 0; j < N; j++) { |         for (size_t j = 0; j < N; j++) { | ||||||
|             float state = (smem_s0[tid * stride_ss0 + j] * expf(dt_soft_plus * smem_A[tid * stride_sA + j])) + |             float state = (smem_s0[tid * stride_ss0 + j] * expf(dt_soft_plus * smem_A[tid * stride_sA + j])) + | ||||||
|                           (B_block[i * stride_B + j] * x_dt); |                           (B_block[i * stride_B + j] * x_dt); | ||||||
|             sumf += state * C_block[i * stride_C + j]; |             sumf += state * C_block[i * stride_C + j]; | ||||||
| @@ -90,7 +87,8 @@ static void ssm_scan_f32_cuda(const float * src0, const float * src1, const floa | |||||||
|                               const int src1_nb0, const int src1_nb1, const int src1_nb2, const int src1_nb3, |                               const int src1_nb0, const int src1_nb1, const int src1_nb2, const int src1_nb3, | ||||||
|                               const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1, |                               const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1, | ||||||
|                               const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2, |                               const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2, | ||||||
|                               float * dst, const int N, const int D, const int L, const int B, cudaStream_t stream) { |                               float * dst, const int64_t N, const int64_t D, const int64_t L, const int64_t B, | ||||||
|  |                               cudaStream_t stream) { | ||||||
|     const int threads = 128; |     const int threads = 128; | ||||||
|     // todo: consider D cannot be divided,does this situation exist? |     // todo: consider D cannot be divided,does this situation exist? | ||||||
|     GGML_ASSERT(D % threads == 0); |     GGML_ASSERT(D % threads == 0); | ||||||
| @@ -99,7 +97,7 @@ static void ssm_scan_f32_cuda(const float * src0, const float * src1, const floa | |||||||
|     if (N == 16) { |     if (N == 16) { | ||||||
|         ssm_scan_f32<128, 16><<<blocks, threads, smem_size, stream>>>( |         ssm_scan_f32<128, 16><<<blocks, threads, smem_size, stream>>>( | ||||||
|             src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0, src1_nb1, src1_nb2, src1_nb3, src2_nb0, |             src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0, src1_nb1, src1_nb2, src1_nb3, src2_nb0, | ||||||
|             src2_nb1, src2_nb2, src3_nb1, src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, D, L, B); |             src2_nb1, src2_nb2, src3_nb1, src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, L); | ||||||
|     } else { |     } else { | ||||||
|         GGML_ABORT("doesn't support N!=16."); |         GGML_ABORT("doesn't support N!=16."); | ||||||
|     } |     } | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user