mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	Fix nasty bug in ggml_compute_forward_mul_mat_f32() and reenable BLAS
This commit is contained in:
		| @@ -917,8 +917,7 @@ static bool llama_eval_internal( | ||||
|             struct ggml_tensor * KQ_scaled = | ||||
|                 ggml_scale(ctx0, | ||||
|                         KQ, | ||||
|                         ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) | ||||
|                         ); | ||||
|                         ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))); | ||||
|  | ||||
|             // KQ_masked = mask_past(KQ_scaled) | ||||
|             struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); | ||||
| @@ -934,7 +933,7 @@ static bool llama_eval_internal( | ||||
|                                 ggml_view_1d(ctx0, kv_self.v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.v)*n_embd), | ||||
|                                 n_embd/n_head, n_head, n_past + N), | ||||
|                             1, 2, 0, 3), | ||||
|                     ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); | ||||
|                     ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd/n_head, n_head)); | ||||
|  | ||||
|             // KQV = transpose(V) * KQ_soft_max | ||||
|             struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov