mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	ggml : add comment about backward GGML_OP_DIAG_MASK_INF (#4203)
This commit is contained in:
		
							
								
								
									
										2
									
								
								ggml.c
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								ggml.c
									
									
									
									
									
								
							| @@ -15335,6 +15335,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor | |||||||
|                     const int n_past = ((int32_t *) tensor->op_params)[0]; |                     const int n_past = ((int32_t *) tensor->op_params)[0]; | ||||||
|                     src0->grad = |                     src0->grad = | ||||||
|                         ggml_add_or_set(ctx, src0->grad, |                         ggml_add_or_set(ctx, src0->grad, | ||||||
|  |                             /* ggml_diag_mask_inf_impl() shouldn't be here */ | ||||||
|  |                             /* ref:  https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */ | ||||||
|                             ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), |                             ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), | ||||||
|                         zero_table); |                         zero_table); | ||||||
|                 } |                 } | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Herman Semenov
					Herman Semenov