mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	cuda : fix unused variable compile warning (whisper/0)
ggml-ci
This commit is contained in:
		@@ -592,6 +592,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
 | 
			
		||||
        dest_ptrs_d = ctx.cuda_graph->dest_ptrs_d;
 | 
			
		||||
        graph_cpynode_index = ctx.cuda_graph->graph_cpynode_index;
 | 
			
		||||
    }
 | 
			
		||||
#else
 | 
			
		||||
    GGML_UNUSED(disable_indirection_for_this_node);
 | 
			
		||||
#endif
 | 
			
		||||
    if (src0->type == src1->type && ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) {
 | 
			
		||||
        GGML_ASSERT(ggml_nbytes(src0) == ggml_nbytes(src1));
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user