mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	ggml : fix multi-threaded clamp_f32 (#11824)
* Bug fix for clamp_f32 When using tensors larger than 1d clamp operation does not work due to the restriction of returning if ith is not 0. * Bug fix for clamp_f32 * Bug fix for clamp_f32
This commit is contained in:
		@@ -9074,10 +9074,6 @@ static void ggml_compute_forward_clamp_f32(
 | 
			
		||||
 | 
			
		||||
    const struct ggml_tensor * src0 = dst->src[0];
 | 
			
		||||
 | 
			
		||||
    if (params->ith != 0) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    float min;
 | 
			
		||||
    float max;
 | 
			
		||||
    memcpy(&min, (float *) dst->op_params + 0, sizeof(float));
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user