rm unused MAMBA_CHUNK_SIZE

This commit is contained in:
ibrahimkhadraoui
2025-07-07 15:29:56 +04:00
parent 0ad3502839
commit 53446f7e42
3 changed files with 0 additions and 3 deletions

View File

@@ -6672,7 +6672,6 @@ class FalconH1Model(Mamba2Model):
# Add Falcon Mamba2 specific configuration
self.gguf_writer.add_uint32("falcon_h1.ssm.mamba_chunk_size", self.hparams["mamba_chunk_size"])
self.gguf_writer.add_uint32("falcon_h1.attention.head_dim", self.hparams["head_dim"])
self.gguf_writer.add_uint32("falcon_h1.ssm.mamba_d_ssm", self.hparams["mamba_d_ssm"])
self.gguf_writer.add_uint32("falcon_h1.num_attention_heads", self.find_hparam(["num_attention_heads"]))

View File

@@ -165,7 +165,6 @@ enum llm_kv {
LLM_KV_FALCON_H1_USE_MLP,
LLM_KV_FALCON_H1_MAMBA_NORM_BEFORE_GATE,
LLM_KV_FALCON_H1_MAMBA_RMS_NORM,
LLM_KV_FALCON_H1_MAMBA_CHUNK_SIZE,
LLM_KV_ROPE_DIMENSION_COUNT,
LLM_KV_ROPE_DIMENSION_SECTIONS,

View File

@@ -1563,7 +1563,6 @@ void llama_model::load_hparams(llama_model_loader & ml) {
ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
ml.get_key(LLM_KV_SSM_GROUP_COUNT, hparams.ssm_n_group);
ml.get_key(LLM_KV_SSM_HEAD_DIM, hparams.ssm_head_dim);
ml.get_key(LLM_KV_FALCON_H1_MAMBA_CHUNK_SIZE, hparams.chunk_size);
// Falcon-H1 parameters
ml.get_key(LLM_KV_ATTN_HEAD_DIM, hparams.attn_head_dim);