another fix

This commit is contained in:
younesbelkada
2025-07-03 15:08:23 +04:00
parent f897efdaf6
commit 71a6848e2d

View File

@@ -1582,6 +1582,8 @@ void llama_model::load_hparams(llama_model_loader & ml) {
ml.get_key(LLM_KV_FALCON_H1_LM_HEAD_MULTIPLIER, hparams.lm_head_multiplier); ml.get_key(LLM_KV_FALCON_H1_LM_HEAD_MULTIPLIER, hparams.lm_head_multiplier);
ml.get_key(LLM_KV_FALCON_H1_EMBEDDING_MULTIPLIER, hparams.embedding_multiplier); ml.get_key(LLM_KV_FALCON_H1_EMBEDDING_MULTIPLIER, hparams.embedding_multiplier);
std::fill(hparams.recurrent_layer_arr.begin(), hparams.recurrent_layer_arr.end(), true);
switch (hparams.n_layer) { switch (hparams.n_layer) {
case 36: case 36:
type = LLM_TYPE_0_5B; break; type = LLM_TYPE_0_5B; break;