mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : fix build_ffn without gate
This commit is contained in:
		| @@ -782,7 +782,7 @@ ggml_tensor * llm_graph_context::build_ffn( | |||||||
|             } break; |             } break; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     if (type_gate == LLM_FFN_PAR) { |     if (gate && type_gate == LLM_FFN_PAR) { | ||||||
|         cur = ggml_mul(ctx0, cur, tmp); |         cur = ggml_mul(ctx0, cur, tmp); | ||||||
|         cb(cur, "ffn_gate_par", il); |         cb(cur, "ffn_gate_par", il); | ||||||
|     } |     } | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen