mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-03 09:22:01 +00:00
convert-llama-7b-pth-to-gguf.py : rope scale fix
This commit is contained in:
@@ -118,7 +118,7 @@ gguf_writer.add_head_count(head_count)
|
|||||||
gguf_writer.add_head_count_kv(head_count_kv)
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]:
|
if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
|
||||||
if "type" in hparams["rope_scaling"]:
|
if "type" in hparams["rope_scaling"]:
|
||||||
if hparams["rope_scaling"]["type"] == "linear":
|
if hparams["rope_scaling"]["type"] == "linear":
|
||||||
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
|
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
|
||||||
|
|||||||
Reference in New Issue
Block a user