model: Add support for CogVLM model (#15002)

* Added GGUF mappings for CogVLM model

* Add tensor mapping for CogVLM visual encoder

* Add CogVLM to conversion script, no vision part yet

* Added CogVLM vision model to conversion script

* Add graph for CogVLM CLIP model

* Add graph for CogVLM

* Fixes for CogVLM. Now compiles.

* Model now runs

* Fixes for cogvlm graph

* Account for graph context change after rebase

* Changes for whitespace

* Changes in convert script according to comments

* Switch CogVLM LLM graph to merged QKV tensor

* Use rope_type variable instead of direct definition

* Change CogVLM CLIP encoder to use SWIGLU

* Switch CogVLM CLIP to use merged QKV

* Apply rebase edits and remove ggml_cont call that is now unnecessary

* clean up

---------

Co-authored-by: Xuan Son Nguyen <son@huggingface.co>
This commit is contained in:
Tianyue-Zhao
2025-10-30 07:18:50 -04:00
committed by GitHub
parent 229bf68628
commit bacddc049a
9 changed files with 501 additions and 26 deletions

View File

@@ -104,6 +104,7 @@ class TensorNameMap:
"backbone.final_layer_norm", # wavtokenizer
"model.norm", # llama4
"model.transformer.ln_f", # llada
"model.norm", # cogvlm
),
# Rope frequencies
@@ -162,6 +163,7 @@ class TensorNameMap:
"encoder.layer.{bid}.layer_norm_1", # jina-v2-code
"rwkv.blocks.{bid}.ln2", # rwkv6
"model.layers.{bid}.ln2", # rwkv7
"model.layers.{bid}.post_attention_layernorm", # cogvlm
),
# Attention query-key-value
@@ -184,6 +186,7 @@ class TensorNameMap:
"encoder.layers.{bid}.self_attention.query_key_value", # chatglm
"transformer.layers.{bid}.attn.qkv_proj", # openelm
"transformer_encoder.{bid}.qkv", # neobert
"model.layers.{bid}.self_attn.language_expert_query_key_value", # cogvlm
),
# Attention query
@@ -279,6 +282,7 @@ class TensorNameMap:
"model.transformer.blocks.{bid}.attn_out", # llada
"layers.{bid}.self_attn.o_proj", # qwen3-embedding
"backbone.layers.{bid}.mixer.o_proj", # nemotron-h
"model.layers.{bid}.self_attn.language_expert_dense", # cogvlm
),
# Attention output norm
@@ -418,6 +422,7 @@ class TensorNameMap:
"model.transformer.blocks.{bid}.up_proj", # llada
"layers.{bid}.mlp.up_proj", # qwen3-embedding
"backbone.layers.{bid}.mixer.up_proj", # nemotron-h
"model.layers.{bid}.mlp.language_mlp.up_proj", # cogvlm
),
MODEL_TENSOR.FFN_UP_EXP: (
@@ -450,21 +455,22 @@ class TensorNameMap:
# Feed-forward gate
MODEL_TENSOR.FFN_GATE: (
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2
"layers.{bid}.mlp.gate_proj", # embeddinggemma
"layers.{bid}.feed_forward.w1", # llama-pth
"transformer.h.{bid}.mlp.w2", # qwen
"transformer.h.{bid}.mlp.c_fc2", # jais
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
"model.layers.{bid}.feed_forward.w1", # internlm2
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 (split up/gate, no longer used)
"transformer.h.{bid}.mlp.linear_1", # refact
"model.layers.{bid}.residual_mlp.w1", # arctic
"transformer.h.{bid}.mlp.c_fc_0", # exaone
"model.layers.{bid}.feed_forward.gate_proj", # llama4 jamba granite-hybrid
"model.transformer.blocks.{bid}.ff_proj", # llada
"layers.{bid}.mlp.gate_proj", # qwen3-embedding
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2
"layers.{bid}.mlp.gate_proj", # embeddinggemma
"layers.{bid}.feed_forward.w1", # llama-pth
"transformer.h.{bid}.mlp.w2", # qwen
"transformer.h.{bid}.mlp.c_fc2", # jais
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
"model.layers.{bid}.feed_forward.w1", # internlm2
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2 (split up/gate, no longer used)
"transformer.h.{bid}.mlp.linear_1", # refact
"model.layers.{bid}.residual_mlp.w1", # arctic
"transformer.h.{bid}.mlp.c_fc_0", # exaone
"model.layers.{bid}.feed_forward.gate_proj", # llama4 jamba granite-hybrid
"model.transformer.blocks.{bid}.ff_proj", # llada
"layers.{bid}.mlp.gate_proj", # qwen3-embedding
"model.layers.{bid}.mlp.language_mlp.gate_proj", # cogvlm
),
MODEL_TENSOR.FFN_GATE_EXP: (
@@ -522,6 +528,7 @@ class TensorNameMap:
"model.transformer.blocks.{bid}.ff_out", # llada
"layers.{bid}.mlp.down_proj", # qwen3-embedding
"backbone.layers.{bid}.mixer.down_proj", # nemotron-h
"model.layers.{bid}.mlp.language_mlp.down_proj", # cogvlm
),
MODEL_TENSOR.FFN_DOWN_EXP: (
@@ -1047,6 +1054,26 @@ class TensorNameMap:
"encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
),
MODEL_TENSOR.VISEXP_UP: (
"model.layers.{bid}.mlp.vision_mlp.up_proj", # cogvlm
),
MODEL_TENSOR.VISEXP_GATE: (
"model.layers.{bid}.mlp.vision_mlp.gate_proj", # cogvlm
),
MODEL_TENSOR.VISEXP_DOWN: (
"model.layers.{bid}.mlp.vision_mlp.down_proj", # cogvlm
),
MODEL_TENSOR.VISEXP_ATTN_OUT: (
"model.layers.{bid}.self_attn.vision_expert_dense", # cogvlm
),
MODEL_TENSOR.VISEXP_ATTN_QKV: (
"model.layers.{bid}.self_attn.vision_expert_query_key_value", # cogvlm
),
############################################################################
# TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg
MODEL_TENSOR.ENC_OUTPUT_NORM: (
@@ -1148,6 +1175,7 @@ class TensorNameMap:
MODEL_TENSOR.V_MMPROJ_FC: (
"model.connector.modality_projection.proj", # SmolVLM
"model.vision.linear_proj.linear_proj", # cogvlm
),
MODEL_TENSOR.V_MMPROJ_MLP: (
@@ -1164,6 +1192,7 @@ class TensorNameMap:
"vision_tower.vision_model.embeddings.class_embedding",
"model.vision_tower.embeddings.cls_token", # Intern-S1
"vision_model.class_embedding", # llama 4
"model.vision.patch_embedding.cls_embedding", # cogvlm
),
MODEL_TENSOR.V_ENC_EMBD_PATCH: (
@@ -1176,6 +1205,7 @@ class TensorNameMap:
"vision_model.patch_embedding.linear", # llama 4
"visual.patch_embed.proj", # qwen2vl
"vision_tower.patch_embed.proj", # kimi-vl
"model.vision.patch_embedding.proj", # cogvlm
),
MODEL_TENSOR.V_ENC_EMBD_POS: (
@@ -1185,6 +1215,11 @@ class TensorNameMap:
"model.vision_model.embeddings.position_embedding", # SmolVLM
"vision_model.positional_embedding_vlm", # llama 4
"vision_tower.patch_embed.pos_emb", # kimi-vl
"model.vision.patch_embedding.position_embedding", # cogvlm
),
MODEL_TENSOR.V_ENC_ATTN_QKV: (
"model.vision.transformer.layers.{bid}.attention.query_key_value", # cogvlm
),
MODEL_TENSOR.V_ENC_ATTN_Q: (
@@ -1244,6 +1279,7 @@ class TensorNameMap:
"vision_model.model.layers.{bid}.input_layernorm", # llama4
"visual.blocks.{bid}.norm1", # qwen2vl
"vision_tower.encoder.blocks.{bid}.norm0", # kimi-vl (norm0/norm1)
"model.vision.transformer.layers.{bid}.input_layernorm", # cogvlm
),
MODEL_TENSOR.V_ENC_ATTN_O: (
@@ -1257,6 +1293,7 @@ class TensorNameMap:
"vision_encoder.transformer.layers.{bid}.attention.wo", # pixtral
"visual.blocks.{bid}.attn.proj", # qwen2vl
"vision_tower.encoder.blocks.{bid}.wo", # kimi-vl
"model.vision.transformer.layers.{bid}.attention.dense", # cogvlm
),
MODEL_TENSOR.V_ENC_POST_ATTN_NORM: (
@@ -1270,6 +1307,7 @@ class TensorNameMap:
"vision_encoder.transformer.layers.{bid}.ffn_norm", # pixtral
"visual.blocks.{bid}.norm2", # qwen2vl
"vision_tower.encoder.blocks.{bid}.norm1", # kimi-vl (norm0/norm1)
"model.vision.transformer.layers.{bid}.post_attention_layernorm", # cogvlm
),
MODEL_TENSOR.V_ENC_FFN_UP: (
@@ -1283,6 +1321,7 @@ class TensorNameMap:
"visual.blocks.{bid}.mlp.fc1", # qwen2vl
"visual.blocks.{bid}.mlp.up_proj", # qwen2.5vl
"vision_tower.encoder.blocks.{bid}.mlp.fc0", # kimi-vl (fc0/fc1)
"model.vision.transformer.layers.{bid}.mlp.fc1", # cogvlm
),
MODEL_TENSOR.V_ENC_FFN_GATE: (
@@ -1302,6 +1341,7 @@ class TensorNameMap:
"visual.blocks.{bid}.mlp.fc2", # qwen2vl
"visual.blocks.{bid}.mlp.down_proj", # qwen2.5vl
"vision_tower.encoder.blocks.{bid}.mlp.fc1", # kimi-vl (fc0/fc1)
"model.vision.transformer.layers.{bid}.mlp.fc2", # cogvlm
),
MODEL_TENSOR.V_LAYER_SCALE_1: (
@@ -1338,6 +1378,7 @@ class TensorNameMap:
"multi_modal_projector.layer_norm",
"multi_modal_projector.pre_norm",
"pre_mm_projector_norm",
"model.vision.linear_proj.norm1", # cogvlm
),
MODEL_TENSOR.V_MM_SOFT_EMB_NORM: (
@@ -1397,6 +1438,30 @@ class TensorNameMap:
"patch_merger.merging_layer", # mistral
),
MODEL_TENSOR.V_MM_POST_FC_NORM: (
"model.vision.linear_proj.norm1", # cogvlm
),
MODEL_TENSOR.V_MM_UP: (
"model.vision.linear_proj.dense_h_to_4h", # cogvlm
),
MODEL_TENSOR.V_MM_DOWN: (
"model.vision.linear_proj.dense_4h_to_h", # cogvlm
),
MODEL_TENSOR.V_MM_GATE: (
"model.vision.linear_proj.gate_proj", # cogvlm
),
MODEL_TENSOR.V_TOK_BOI: (
"model.vision.boi", # cogvlm
),
MODEL_TENSOR.V_TOK_EOI: (
"model.vision.eoi", # cogvlm
),
# audio (mtmd)
MODEL_TENSOR.A_ENC_EMBD_POS: (