mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	CUDA: fix padding logic for FP16/FP32 (#8884)
This commit is contained in:
		@@ -1501,7 +1501,7 @@ static void ggml_cuda_op_mul_mat(
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // If src0 is on a temporary compute buffers (partial offloading) there may be some padding that needs to be cleared:
 | 
			
		||||
        if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) {
 | 
			
		||||
        if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_is_quantized(src0->type) && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) {
 | 
			
		||||
            const int64_t nbytes_data    = ggml_row_size(src0->type, (dev[id].row_high - dev[id].row_low)*ne00);
 | 
			
		||||
            const int64_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING);
 | 
			
		||||
            CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd + nbytes_data , 0, nbytes_padding, stream));
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user