mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : fix MiniCPM (#5392)
* fix bug for norm_rms_eps missing * to align with the same order as convert.py for model write * fix: undo HF models permute tensor * update for flake8 lint
This commit is contained in:
		| @@ -2947,6 +2947,8 @@ static void llm_load_hparams( | ||||
|             } break; | ||||
|         case LLM_ARCH_MINICPM: | ||||
|             { | ||||
|                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); | ||||
|  | ||||
|                 switch (hparams.n_layer) { | ||||
|                     case 40: model.type = e_model::MODEL_2B; break; | ||||
|                     default: model.type = e_model::MODEL_UNKNOWN; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 runfuture
					runfuture