mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
convert-gptneox-hf-to-gguf.py : remove extra kv
This commit is contained in:
@@ -96,7 +96,6 @@ print("gguf: get model metadata")
|
||||
|
||||
block_count = hparams["num_hidden_layers"]
|
||||
|
||||
gguf_writer.add_architecture()
|
||||
gguf_writer.add_name(last_dir)
|
||||
gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||
|
||||
Reference in New Issue
Block a user