mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	ggml : add asserts (#14720)
* ggml : add asserts ggml-ci * cont : fix constant type Co-authored-by: Diego Devesa <slarengh@gmail.com> --------- Co-authored-by: Diego Devesa <slarengh@gmail.com>
This commit is contained in:
		| @@ -4015,6 +4015,9 @@ static void ggml_compute_forward_rms_norm_f32( | |||||||
|  |  | ||||||
|                 const float scale = 1.0f/sqrtf(mean + eps); |                 const float scale = 1.0f/sqrtf(mean + eps); | ||||||
|  |  | ||||||
|  |                 // if you hit this, likely you got an inf somewhere earlier | ||||||
|  |                 assert(scale > 0.0f); | ||||||
|  |  | ||||||
|                 ggml_vec_scale_f32(ne00, y, scale); |                 ggml_vec_scale_f32(ne00, y, scale); | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|   | |||||||
| @@ -221,6 +221,9 @@ void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * G | |||||||
|     for (int i = np; i < n; ++i) { |     for (int i = np; i < n; ++i) { | ||||||
|         sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); |         sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |     // if you hit this, you are likely running outside the FP range | ||||||
|  |     assert(!isnan(sumf) && !isinf(sumf)); | ||||||
| #else | #else | ||||||
|     for (int i = 0; i < n; ++i) { |     for (int i = 0; i < n; ++i) { | ||||||
|         sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); |         sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov