mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	CUDA: add unused vars to mmvf and mmvq (#16807)
This commit is contained in:
		@@ -343,6 +343,10 @@ static __global__ void mul_mat_vec_f(
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    dst[tid*stride_col_dst + row] = value;
 | 
			
		||||
 | 
			
		||||
    if constexpr (!has_fusion) {
 | 
			
		||||
        GGML_UNUSED_VARS(use_gate, use_bias, use_gate_bias, glu_op, gate_x, x_bias, gate_bias, sumf_gate);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename T, typename type_acc, int ncols_dst, int block_size>
 | 
			
		||||
 
 | 
			
		||||
@@ -310,6 +310,10 @@ static __global__ void mul_mat_vec_q(
 | 
			
		||||
            dst[j*stride_col_dst + threadIdx.x] = result;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if constexpr (!has_fusion) {
 | 
			
		||||
        GGML_UNUSED_VARS(use_gate, use_bias, use_gate_bias, active_glu, gate_bias, x_bias, tmp_gate);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static std::pair<dim3, dim3> calc_launch_params(
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user