mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
gguf : single pass for writing tensors + refactoring writer
This commit is contained in:
344
gguf.py
344
gguf.py
@@ -1,11 +1,7 @@
|
||||
"""TODOs
|
||||
1. Implement writers for known architectures, LLaMA in particular.
|
||||
2. Add docstrings from the format specs.
|
||||
3. After development is done, Convert it to a proper pip-installable Python package, and possibly move it to its own repo under ggml-org.
|
||||
"""
|
||||
|
||||
import shutil
|
||||
import sys
|
||||
import struct
|
||||
import tempfile
|
||||
import numpy as np
|
||||
|
||||
from enum import IntEnum
|
||||
@@ -15,152 +11,153 @@ from typing import Any, IO, List
|
||||
# constants
|
||||
#
|
||||
|
||||
GGUF_MAGIC = 0x47475546
|
||||
GGUF_VERSION = 1
|
||||
GGUF_MAGIC = 0x47475546
|
||||
GGUF_VERSION = 1
|
||||
GGUF_DEFAULT_ALIGNMENT = 32
|
||||
|
||||
# general
|
||||
KEY_GENERAL_ARCHITECTURE = "general.architecture"
|
||||
KEY_GENERAL_ARCHITECTURE = "general.architecture"
|
||||
KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version"
|
||||
KEY_GENERAL_ALIGNMENT = "general.alignment"
|
||||
KEY_GENERAL_NAME = "general.name"
|
||||
KEY_GENERAL_AUTHOR = "general.author"
|
||||
KEY_GENERAL_URL = "general.url"
|
||||
KEY_GENERAL_DESCRIPTION = "general.description"
|
||||
KEY_GENERAL_FILE_TYPE = "general.file_type"
|
||||
KEY_GENERAL_LICENSE = "general.license"
|
||||
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
||||
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
||||
KEY_GENERAL_ALIGNMENT = "general.alignment"
|
||||
KEY_GENERAL_NAME = "general.name"
|
||||
KEY_GENERAL_AUTHOR = "general.author"
|
||||
KEY_GENERAL_URL = "general.url"
|
||||
KEY_GENERAL_DESCRIPTION = "general.description"
|
||||
KEY_GENERAL_FILE_TYPE = "general.file_type"
|
||||
KEY_GENERAL_LICENSE = "general.license"
|
||||
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
||||
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
||||
|
||||
# LLM
|
||||
KEY_LLM_CONTEXT_LENGTH = "{llm}.context_length"
|
||||
KEY_LLM_EMBEDDING_LENGTH = "{llm}.embedding_length"
|
||||
KEY_LLM_BLOCK_COUNT = "{llm}.block_count"
|
||||
KEY_LLM_FEED_FORWARD_LENGTH = "{llm}.feed_forward_length"
|
||||
KEY_LLM_USE_PARALLEL_RESIDUAL = "{llm}.use_parallel_residual"
|
||||
KEY_LLM_TENSOR_DATA_LAYOUT = "{llm}.tensor_data_layout"
|
||||
KEY_LLM_CONTEXT_LENGTH = "{llm}.context_length"
|
||||
KEY_LLM_EMBEDDING_LENGTH = "{llm}.embedding_length"
|
||||
KEY_LLM_BLOCK_COUNT = "{llm}.block_count"
|
||||
KEY_LLM_FEED_FORWARD_LENGTH = "{llm}.feed_forward_length"
|
||||
KEY_LLM_USE_PARALLEL_RESIDUAL = "{llm}.use_parallel_residual"
|
||||
KEY_LLM_TENSOR_DATA_LAYOUT = "{llm}.tensor_data_layout"
|
||||
|
||||
# attention
|
||||
KEY_ATTENTION_HEAD_COUNT = "{llm}.attention.head_count"
|
||||
KEY_ATTENTION_HEAD_COUNT_KV = "{llm}.attention.head_count_kv"
|
||||
KEY_ATTENTION_MAX_ALIBI_BIAS = "{llm}.attention.max_alibi_bias"
|
||||
KEY_ATTENTION_CLAMP_KQV = "{llm}.attention.clamp_kqv"
|
||||
KEY_ATTENTION_LAYERNORM_EPS = "{llm}.attention.layer_norm_epsilon"
|
||||
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{llm}.attention.layer_norm_rms_epsilon"
|
||||
KEY_ATTENTION_HEAD_COUNT = "{llm}.attention.head_count"
|
||||
KEY_ATTENTION_HEAD_COUNT_KV = "{llm}.attention.head_count_kv"
|
||||
KEY_ATTENTION_MAX_ALIBI_BIAS = "{llm}.attention.max_alibi_bias"
|
||||
KEY_ATTENTION_CLAMP_KQV = "{llm}.attention.clamp_kqv"
|
||||
KEY_ATTENTION_LAYERNORM_EPS = "{llm}.attention.layer_norm_epsilon"
|
||||
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{llm}.attention.layer_norm_rms_epsilon"
|
||||
|
||||
# RoPE
|
||||
KEY_ROPE_DIMENSION_COUNT = "{llm}.rope.dimension_count"
|
||||
KEY_ROPE_SCALE = "{llm}.rope.scale"
|
||||
KEY_ROPE_DIMENSION_COUNT = "{llm}.rope.dimension_count"
|
||||
KEY_ROPE_SCALE = "{llm}.rope.scale"
|
||||
|
||||
# tokenization
|
||||
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
|
||||
KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens"
|
||||
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
|
||||
KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens"
|
||||
KEY_TOKENIZER_TOKEN_TYPE = "tokenizer.ggml.token_type"
|
||||
KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores"
|
||||
KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges"
|
||||
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
|
||||
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
|
||||
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
|
||||
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id"
|
||||
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"
|
||||
KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json"
|
||||
KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
|
||||
KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores"
|
||||
KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges"
|
||||
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
|
||||
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
|
||||
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
|
||||
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id"
|
||||
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"
|
||||
KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json"
|
||||
KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
|
||||
|
||||
#
|
||||
# recommended mapping of model tensor names for storage in gguf
|
||||
#
|
||||
|
||||
def get_tensor_name_map(n_blocks : int):
|
||||
|
||||
def get_tensor_name_map(n_blocks: int):
|
||||
tensor_map = {}
|
||||
# Token embeddings
|
||||
mapped_to = "token_embd"
|
||||
tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox
|
||||
tensor_map["transformer.wte"] = mapped_to # gpt2 mpt
|
||||
tensor_map["transformer.word_embeddings"] = mapped_to # falcon
|
||||
tensor_map["model.embed_tokens"] = mapped_to # llama-hf
|
||||
tensor_map["tok_embeddings"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox
|
||||
tensor_map["transformer.wte"] = mapped_to # gpt2 mpt
|
||||
tensor_map["transformer.word_embeddings"] = mapped_to # falcon
|
||||
tensor_map["model.embed_tokens"] = mapped_to # llama-hf
|
||||
tensor_map["tok_embeddings"] = mapped_to # llama-pth
|
||||
# Position embeddings
|
||||
mapped_to = "pos_embd"
|
||||
tensor_map["transformer.wpe"] = mapped_to # gpt2
|
||||
tensor_map["transformer.wpe"] = mapped_to # gpt2
|
||||
# Output norm
|
||||
mapped_to = "output_norm"
|
||||
tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox
|
||||
tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon
|
||||
tensor_map["transformer.norm_f"] = mapped_to # mpt
|
||||
tensor_map["model.norm"] = mapped_to # llama-hf
|
||||
tensor_map["norm"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox
|
||||
tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon
|
||||
tensor_map["transformer.norm_f"] = mapped_to # mpt
|
||||
tensor_map["model.norm"] = mapped_to # llama-hf
|
||||
tensor_map["norm"] = mapped_to # llama-pth
|
||||
# Output
|
||||
mapped_to = "output"
|
||||
tensor_map["embed_out"] = mapped_to # gptneox
|
||||
tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf
|
||||
tensor_map["output"] = mapped_to # llama-pth
|
||||
tensor_map["embed_out"] = mapped_to # gptneox
|
||||
tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf
|
||||
tensor_map["output"] = mapped_to # llama-pth
|
||||
# Attention and fee-forward layer blocks
|
||||
for i in range(0,n_blocks):
|
||||
for i in range(0, n_blocks):
|
||||
# Attention norm
|
||||
mapped_to = "blk."+str(i)+".attn_norm"
|
||||
tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b
|
||||
tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b
|
||||
tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b
|
||||
tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b
|
||||
tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth
|
||||
# Attention norm 2
|
||||
mapped_to = "blk."+str(i)+".attn_norm_2"
|
||||
tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b
|
||||
tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b
|
||||
# Attention query-key-value
|
||||
mapped_to = "blk."+str(i)+".attn_qkv"
|
||||
tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon
|
||||
tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon
|
||||
# Attention query
|
||||
mapped_to = "blk."+str(i)+".attn_q"
|
||||
tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth
|
||||
tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth
|
||||
# Attention key
|
||||
mapped_to = "blk."+str(i)+".attn_k"
|
||||
tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth
|
||||
tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth
|
||||
# Attention value
|
||||
mapped_to = "blk."+str(i)+".attn_v"
|
||||
tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth
|
||||
tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth
|
||||
# Attention output
|
||||
mapped_to = "blk."+str(i)+".attn_output"
|
||||
tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon
|
||||
tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon
|
||||
tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth
|
||||
# Feed-forward norm
|
||||
mapped_to = "blk."+str(i)+".ffn_norm"
|
||||
tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt
|
||||
tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt
|
||||
tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth
|
||||
# Feed-forward up
|
||||
mapped_to = "blk."+str(i)+".ffn_up"
|
||||
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon
|
||||
tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon
|
||||
tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth
|
||||
# Feed-forward gate
|
||||
mapped_to = "blk."+str(i)+".ffn_gate"
|
||||
tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth
|
||||
tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth
|
||||
# Feed-forward down
|
||||
mapped_to = "blk."+str(i)+".ffn_down"
|
||||
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon
|
||||
tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth
|
||||
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox
|
||||
tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2
|
||||
tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt
|
||||
tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon
|
||||
tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf
|
||||
tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth
|
||||
|
||||
return tensor_map
|
||||
|
||||
@@ -168,22 +165,23 @@ def get_tensor_name_map(n_blocks : int):
|
||||
# implementation
|
||||
#
|
||||
|
||||
|
||||
class GGMLQuantizationType(IntEnum):
|
||||
F32 = 0
|
||||
F16 = 1
|
||||
|
||||
|
||||
class GGUFValueType(IntEnum):
|
||||
UINT8 = 0
|
||||
INT8 = 1
|
||||
UINT16 = 2
|
||||
INT16 = 3
|
||||
UINT32 = 4
|
||||
INT32 = 5
|
||||
UINT8 = 0
|
||||
INT8 = 1
|
||||
UINT16 = 2
|
||||
INT16 = 3
|
||||
UINT32 = 4
|
||||
INT32 = 5
|
||||
FLOAT32 = 6
|
||||
BOOL = 7
|
||||
STRING = 8
|
||||
ARRAY = 9
|
||||
BOOL = 7
|
||||
STRING = 8
|
||||
ARRAY = 9
|
||||
|
||||
@staticmethod
|
||||
def get_type(val):
|
||||
@@ -203,8 +201,9 @@ class GGUFValueType(IntEnum):
|
||||
|
||||
|
||||
class GGUFWriter:
|
||||
def __init__(self, fout: IO):
|
||||
self.fout = fout
|
||||
def __init__(self, path: str, architecture: str):
|
||||
self.fout = open(path, "wb")
|
||||
self.arch = architecture
|
||||
self.offset_tensor = 0
|
||||
self.data_alignment = GGUF_DEFAULT_ALIGNMENT
|
||||
self.kv_data = b""
|
||||
@@ -228,11 +227,6 @@ class GGUFWriter:
|
||||
self.fout.write(self.ti_data)
|
||||
self.flush()
|
||||
|
||||
@classmethod
|
||||
def open(cls, path: str) -> "GGUFWriter":
|
||||
f = open(path, "wb")
|
||||
return cls(f)
|
||||
|
||||
def add_key(self, key: str):
|
||||
self.add_val(key, GGUFValueType.STRING, add_vtype=False)
|
||||
|
||||
@@ -269,7 +263,8 @@ class GGUFWriter:
|
||||
self.add_val(val, GGUFValueType.BOOL)
|
||||
|
||||
def add_string(self, key: str, val: str):
|
||||
if len(val) == 0: return
|
||||
if len(val) == 0:
|
||||
return
|
||||
self.add_key(key)
|
||||
self.add_val(val, GGUFValueType.STRING)
|
||||
|
||||
@@ -323,6 +318,8 @@ class GGUFWriter:
|
||||
return ((x + n - 1) // n) * n
|
||||
|
||||
def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int):
|
||||
assert tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
|
||||
|
||||
encoded_name = name.encode("utf8")
|
||||
self.ti_data += struct.pack("<I", len(encoded_name))
|
||||
self.ti_data += encoded_name
|
||||
@@ -331,13 +328,25 @@ class GGUFWriter:
|
||||
for i in range(n_dims):
|
||||
self.ti_data += struct.pack("<I", tensor_shape[n_dims - 1 - i])
|
||||
|
||||
assert tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
|
||||
dtype = GGMLQuantizationType.F32 if tensor_dtype == np.float32 else GGMLQuantizationType.F16
|
||||
self.ti_data += struct.pack("<I", dtype)
|
||||
self.ti_data += struct.pack("<Q", self.offset_tensor)
|
||||
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
||||
self.ti_data_count += 1
|
||||
|
||||
def add_tensor(self, name: str, tensor: np.ndarray):
|
||||
if not hasattr(self, "temp_file"):
|
||||
self.temp_file = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024)
|
||||
self.temp_file.seek(0)
|
||||
|
||||
self.add_tensor_info(name, tensor.shape, tensor.dtype, tensor.nbytes)
|
||||
|
||||
tensor.tofile(self.temp_file)
|
||||
|
||||
pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes
|
||||
if pad != 0:
|
||||
self.temp_file.write(bytes([0] * pad))
|
||||
|
||||
def write_tensor_to_file(self, tensor: np.ndarray):
|
||||
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||
if pad != 0:
|
||||
@@ -349,21 +358,33 @@ class GGUFWriter:
|
||||
if pad != 0:
|
||||
self.fout.write(bytes([0] * pad))
|
||||
|
||||
def write_tensors_to_file(self):
|
||||
self.write_ti_data_to_file()
|
||||
|
||||
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||
if pad != 0:
|
||||
self.fout.write(bytes([0] * pad))
|
||||
|
||||
self.temp_file.seek(0)
|
||||
|
||||
shutil.copyfileobj(self.temp_file, self.fout)
|
||||
self.flush()
|
||||
self.temp_file.close()
|
||||
|
||||
def flush(self):
|
||||
self.fout.flush()
|
||||
|
||||
def close(self):
|
||||
self.fout.close()
|
||||
|
||||
def add_architecture(self, architecture: str):
|
||||
self.add_string(KEY_GENERAL_ARCHITECTURE,
|
||||
architecture)
|
||||
def add_architecture(self):
|
||||
self.add_string(KEY_GENERAL_ARCHITECTURE, self.arch)
|
||||
|
||||
def add_author(self, author: str):
|
||||
self.add_string(KEY_GENERAL_AUTHOR, author)
|
||||
|
||||
def add_tensor_data_layout(self, layout: str):
|
||||
self.add_string(KEY_LLM_TENSOR_DATA_LAYOUT , layout)
|
||||
self.add_string(KEY_LLM_TENSOR_DATA_LAYOUT.format(llm=self.arch), layout)
|
||||
|
||||
def add_url(self, url: str):
|
||||
self.add_string(KEY_GENERAL_URL, url)
|
||||
@@ -391,60 +412,60 @@ class GGUFWriter:
|
||||
self.data_alignment = alignment
|
||||
self.add_uint32(KEY_GENERAL_ALIGNMENT, alignment)
|
||||
|
||||
def add_context_length(self, llm: str, length: int):
|
||||
def add_context_length(self, length: int):
|
||||
self.add_uint32(
|
||||
KEY_LLM_CONTEXT_LENGTH.format(llm=llm), length)
|
||||
KEY_LLM_CONTEXT_LENGTH.format(llm=self.arch), length)
|
||||
|
||||
def add_embedding_length(self, llm: str, length: int):
|
||||
def add_embedding_length(self, length: int):
|
||||
self.add_uint32(
|
||||
KEY_LLM_EMBEDDING_LENGTH.format(llm=llm), length)
|
||||
KEY_LLM_EMBEDDING_LENGTH.format(llm=self.arch), length)
|
||||
|
||||
def add_block_count(self, llm: str, length: int):
|
||||
def add_block_count(self, length: int):
|
||||
self.add_uint32(
|
||||
KEY_LLM_BLOCK_COUNT.format(llm=llm), length)
|
||||
KEY_LLM_BLOCK_COUNT.format(llm=self.arch), length)
|
||||
|
||||
def add_feed_forward_length(self, llm: str, length: int):
|
||||
def add_feed_forward_length(self, length: int):
|
||||
self.add_uint32(
|
||||
KEY_LLM_FEED_FORWARD_LENGTH.format(llm=llm), length)
|
||||
KEY_LLM_FEED_FORWARD_LENGTH.format(llm=self.arch), length)
|
||||
|
||||
def add_parallel_residual(self, llm: str, use: bool):
|
||||
def add_parallel_residual(self, use: bool):
|
||||
self.add_bool(
|
||||
KEY_LLM_USE_PARALLEL_RESIDUAL.format(llm=llm), use)
|
||||
KEY_LLM_USE_PARALLEL_RESIDUAL.format(llm=self.arch), use)
|
||||
|
||||
def add_tensor_data_layout(self, llm: str, layout: str):
|
||||
def add_tensor_data_layout(self, layout: str):
|
||||
self.add_string(
|
||||
KEY_LLM_TENSOR_DATA_LAYOUT.format(llm=llm), layout)
|
||||
KEY_LLM_TENSOR_DATA_LAYOUT.format(llm=self.arch), layout)
|
||||
|
||||
def add_head_count(self, llm: str, count: int):
|
||||
def add_head_count(self, count: int):
|
||||
self.add_uint32(
|
||||
KEY_ATTENTION_HEAD_COUNT.format(llm=llm), count)
|
||||
KEY_ATTENTION_HEAD_COUNT.format(llm=self.arch), count)
|
||||
|
||||
def add_head_count_kv(self, llm: str, count: int):
|
||||
def add_head_count_kv(self, count: int):
|
||||
self.add_uint32(
|
||||
KEY_ATTENTION_HEAD_COUNT_KV.format(llm=llm), count)
|
||||
KEY_ATTENTION_HEAD_COUNT_KV.format(llm=self.arch), count)
|
||||
|
||||
def add_max_alibi_bias(self, llm: str, bias: float):
|
||||
def add_max_alibi_bias(self, bias: float):
|
||||
self.add_float32(
|
||||
KEY_ATTENTION_MAX_ALIBI_BIAS.format(llm=llm), bias)
|
||||
KEY_ATTENTION_MAX_ALIBI_BIAS.format(llm=self.arch), bias)
|
||||
|
||||
def add_clamp_kqv(self, llm: str, value: float):
|
||||
def add_clamp_kqv(self, value: float):
|
||||
self.add_float32(
|
||||
KEY_ATTENTION_CLAMP_KQV.format(llm=llm), value)
|
||||
KEY_ATTENTION_CLAMP_KQV.format(llm=self.arch), value)
|
||||
|
||||
def add_layer_norm_eps(self, llm: str, value: float):
|
||||
def add_layer_norm_eps(self, value: float):
|
||||
self.add_float32(
|
||||
KEY_ATTENTION_LAYERNORM_EPS.format(llm=llm), value)
|
||||
KEY_ATTENTION_LAYERNORM_EPS.format(llm=self.arch), value)
|
||||
|
||||
def add_layer_norm_rms_eps(self, llm: str, value: float):
|
||||
def add_layer_norm_rms_eps(self, value: float):
|
||||
self.add_float32(
|
||||
KEY_ATTENTION_LAYERNORM_RMS_EPS.format(llm=llm), value)
|
||||
KEY_ATTENTION_LAYERNORM_RMS_EPS.format(llm=self.arch), value)
|
||||
|
||||
def add_rope_dimension_count(self, llm: str, count: int):
|
||||
def add_rope_dimension_count(self, count: int):
|
||||
self.add_uint32(
|
||||
KEY_ROPE_DIMENSION_COUNT.format(llm=llm), count)
|
||||
KEY_ROPE_DIMENSION_COUNT.format(llm=self.arch), count)
|
||||
|
||||
def add_rope_scale(self, llm: str, value: float):
|
||||
self.add_float32(KEY_ROPE_SCALE.format(llm=llm), value)
|
||||
def add_rope_scale(self, value: float):
|
||||
self.add_float32(KEY_ROPE_SCALE.format(llm=self.arch), value)
|
||||
|
||||
def add_tokenizer_model(self, model: str):
|
||||
self.add_string(KEY_TOKENIZER_MODEL, model)
|
||||
@@ -476,24 +497,27 @@ class GGUFWriter:
|
||||
def add_pad_token_id(self, id: int):
|
||||
self.add_uint32(KEY_TOKENIZER_PAD_ID, id)
|
||||
|
||||
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
# Example usage with a file
|
||||
gguf_writer = GGUFWriter.open("example.gguf")
|
||||
gguf_writer = GGUFWriter("example.gguf", "llama")
|
||||
|
||||
gguf_writer.add_architecture("llama")
|
||||
gguf_writer.add_architecture()
|
||||
gguf_writer.add_block_count(12)
|
||||
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
||||
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
||||
gguf_writer.add_custom_alignment(64)
|
||||
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
||||
tensor2 = np.ones((32,), dtype=np.float32) * 101.0
|
||||
gguf_writer.add_tensor_info("tensor0", tensor1)
|
||||
gguf_writer.add_tensor_info("tensor1", tensor2)
|
||||
tensor2 = np.ones((64,), dtype=np.float32) * 101.0
|
||||
tensor3 = np.ones((96,), dtype=np.float32) * 102.0
|
||||
|
||||
gguf_writer.add_tensor("tensor1", tensor1)
|
||||
gguf_writer.add_tensor("tensor2", tensor2)
|
||||
gguf_writer.add_tensor("tensor3", tensor3)
|
||||
|
||||
gguf_writer.write_header_to_file()
|
||||
gguf_writer.write_kv_data_to_file()
|
||||
gguf_writer.write_ti_data_to_file()
|
||||
gguf_writer.write_tensor_to_file(tensor1)
|
||||
gguf_writer.write_tensor_to_file(tensor2)
|
||||
gguf_writer.write_tensors_to_file()
|
||||
|
||||
gguf_writer.close()
|
||||
|
||||
Reference in New Issue
Block a user