mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
convert : BailingMoE : avoid setting rope_dim to 0 (#12678)
This commit is contained in:
@@ -5146,7 +5146,7 @@ class BailingMoeModel(Model):
|
|||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
super().set_gguf_parameters()
|
super().set_gguf_parameters()
|
||||||
hparams = self.hparams
|
hparams = self.hparams
|
||||||
if "head_dim" in hparams:
|
if hparams.get("head_dim"):
|
||||||
rope_dim = hparams["head_dim"]
|
rope_dim = hparams["head_dim"]
|
||||||
else:
|
else:
|
||||||
rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
|
rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"]
|
||||||
|
|||||||
Reference in New Issue
Block a user