mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	gguf.py : accumulate kv and tensor info data + special tokens
This commit is contained in:
		
							
								
								
									
										284
									
								
								gguf.py
									
									
									
									
									
								
							
							
						
						
									
										284
									
								
								gguf.py
									
									
									
									
									
								
							@@ -61,98 +61,113 @@ class GGUFWriter:
 | 
				
			|||||||
    def __init__(self, fout: IO):
 | 
					    def __init__(self, fout: IO):
 | 
				
			||||||
        self.fout = fout
 | 
					        self.fout = fout
 | 
				
			||||||
        self.offset_tensor = 0
 | 
					        self.offset_tensor = 0
 | 
				
			||||||
 | 
					        self.kv_data = b""
 | 
				
			||||||
 | 
					        self.kv_data_count = 0
 | 
				
			||||||
 | 
					        self.ti_data = b""
 | 
				
			||||||
 | 
					        self.ti_data_count = 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_header(self, tensor_count: int, metadata_kv_count: int):
 | 
					    def write_header_to_file(self):
 | 
				
			||||||
        self.fout.write(struct.pack("<I", constants.GGUF_MAGIC))
 | 
					        self.fout.write(struct.pack("<I", constants.GGUF_MAGIC))
 | 
				
			||||||
        self.fout.write(struct.pack("<I", constants.GGUF_VERSION))
 | 
					        self.fout.write(struct.pack("<I", constants.GGUF_VERSION))
 | 
				
			||||||
        self.fout.write(struct.pack("<I", tensor_count))
 | 
					        self.fout.write(struct.pack("<I", self.ti_data_count))
 | 
				
			||||||
        self.fout.write(struct.pack("<I", metadata_kv_count))
 | 
					        self.fout.write(struct.pack("<I", self.kv_data_count))
 | 
				
			||||||
 | 
					        self.flush()
 | 
				
			||||||
 | 
					#        print("tensors " + str(self.ti_data_count) + " kv " + str(self.kv_data_count))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def write_kv_data_to_file(self):
 | 
				
			||||||
 | 
					        self.fout.write(self.kv_data)
 | 
				
			||||||
 | 
					        self.flush()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def write_ti_data_to_file(self):
 | 
				
			||||||
 | 
					        self.fout.write(self.ti_data)
 | 
				
			||||||
 | 
					        self.flush()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    @classmethod
 | 
					    @classmethod
 | 
				
			||||||
    def open(cls, path: str) -> "GGUFWriter":
 | 
					    def open(cls, path: str) -> "GGUFWriter":
 | 
				
			||||||
        f = open(path, "wb")
 | 
					        f = open(path, "wb")
 | 
				
			||||||
        return cls(f)
 | 
					        return cls(f)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_key(self, key: str):
 | 
					    def add_key(self, key: str):
 | 
				
			||||||
        self.write_val(key, GGUFValueType.STRING, write_vtype=False)
 | 
					        self.add_val(key, GGUFValueType.STRING, add_vtype=False)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_uint8(self, key: str, val: int):
 | 
					    def add_uint8(self, key: str, val: int):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.UINT8)
 | 
					        self.add_val(val, GGUFValueType.UINT8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_int8(self, key: str, val: int):
 | 
					    def add_int8(self, key: str, val: int):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.INT8)
 | 
					        self.add_val(val, GGUFValueType.INT8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_uint16(self, key: str, val: int):
 | 
					    def add_uint16(self, key: str, val: int):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.UINT16)
 | 
					        self.add_val(val, GGUFValueType.UINT16)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_int16(self, key: str, val: int):
 | 
					    def add_int16(self, key: str, val: int):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.INT16)
 | 
					        self.add_val(val, GGUFValueType.INT16)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_uint32(self, key: str, val: int):
 | 
					    def add_uint32(self, key: str, val: int):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.UINT32)
 | 
					        self.add_val(val, GGUFValueType.UINT32)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_int32(self, key: str, val: int):
 | 
					    def add_int32(self, key: str, val: int):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.INT32)
 | 
					        self.add_val(val, GGUFValueType.INT32)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_float32(self, key: str, val: float):
 | 
					    def add_float32(self, key: str, val: float):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.FLOAT32)
 | 
					        self.add_val(val, GGUFValueType.FLOAT32)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_bool(self, key: str, val: bool):
 | 
					    def add_bool(self, key: str, val: bool):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.BOOL)
 | 
					        self.add_val(val, GGUFValueType.BOOL)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_string(self, key: str, val: str):
 | 
					    def add_string(self, key: str, val: str):
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.STRING)
 | 
					        self.add_val(val, GGUFValueType.STRING)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_array(self, key: str, val: list):
 | 
					    def add_array(self, key: str, val: list):
 | 
				
			||||||
        if not isinstance(val, list):
 | 
					        if not isinstance(val, list):
 | 
				
			||||||
            raise ValueError("Value must be a list for array type")
 | 
					            raise ValueError("Value must be a list for array type")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        self.write_key(key)
 | 
					        self.add_key(key)
 | 
				
			||||||
        self.write_val(val, GGUFValueType.ARRAY)
 | 
					        self.add_val(val, GGUFValueType.ARRAY)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_val(self: str, val: Any, vtype: GGUFValueType = None, write_vtype: bool = True):
 | 
					    def add_val(self: str, val: Any, vtype: GGUFValueType = None, add_vtype: bool = True):
 | 
				
			||||||
        if vtype is None:
 | 
					        if vtype is None:
 | 
				
			||||||
            vtype = GGUFValueType.get_type(val)
 | 
					            vtype = GGUFValueType.get_type(val)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if write_vtype:
 | 
					        if add_vtype:
 | 
				
			||||||
            self.fout.write(struct.pack("<I", vtype))
 | 
					            self.kv_data += struct.pack("<I", vtype)
 | 
				
			||||||
 | 
					            self.kv_data_count += 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if vtype == GGUFValueType.UINT8:
 | 
					        if vtype == GGUFValueType.UINT8:
 | 
				
			||||||
            self.fout.write(struct.pack("<B", val))
 | 
					            self.kv_data += struct.pack("<B", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.INT8:
 | 
					        elif vtype == GGUFValueType.INT8:
 | 
				
			||||||
            self.fout.write(struct.pack("<b", val))
 | 
					            self.kv_data += struct.pack("<b", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.UINT16:
 | 
					        elif vtype == GGUFValueType.UINT16:
 | 
				
			||||||
            self.fout.write(struct.pack("<H", val))
 | 
					            self.kv_data += struct.pack("<H", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.INT16:
 | 
					        elif vtype == GGUFValueType.INT16:
 | 
				
			||||||
            self.fout.write(struct.pack("<h", val))
 | 
					            self.kv_data += struct.pack("<h", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.UINT32:
 | 
					        elif vtype == GGUFValueType.UINT32:
 | 
				
			||||||
            self.fout.write(struct.pack("<I", val))
 | 
					            self.kv_data += struct.pack("<I", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.INT32:
 | 
					        elif vtype == GGUFValueType.INT32:
 | 
				
			||||||
            self.fout.write(struct.pack("<i", val))
 | 
					            self.kv_data += struct.pack("<i", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.FLOAT32:
 | 
					        elif vtype == GGUFValueType.FLOAT32:
 | 
				
			||||||
            self.fout.write(struct.pack("<f", val))
 | 
					            self.kv_data += struct.pack("<f", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.BOOL:
 | 
					        elif vtype == GGUFValueType.BOOL:
 | 
				
			||||||
            self.fout.write(struct.pack("?", val))
 | 
					            self.kv_data += struct.pack("?", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.STRING:
 | 
					        elif vtype == GGUFValueType.STRING:
 | 
				
			||||||
            encoded_val = val.encode("utf8") if isinstance(val, str) else val
 | 
					            encoded_val = val.encode("utf8") if isinstance(val, str) else val
 | 
				
			||||||
            self.fout.write(struct.pack("<I", len(encoded_val)))
 | 
					            self.kv_data += struct.pack("<I", len(encoded_val))
 | 
				
			||||||
            self.fout.write(encoded_val)
 | 
					            self.kv_data += encoded_val
 | 
				
			||||||
        elif vtype == GGUFValueType.ARRAY:
 | 
					        elif vtype == GGUFValueType.ARRAY:
 | 
				
			||||||
            ltype = set([GGUFValueType.get_type(item) for item in val])
 | 
					            ltype = set([GGUFValueType.get_type(item) for item in val])
 | 
				
			||||||
            assert len(ltype) == 1, "All items in a GGUF array should be of the same type"
 | 
					            assert len(ltype) == 1, "All items in a GGUF array should be of the same type"
 | 
				
			||||||
            self.fout.write(struct.pack("<I", list(ltype)[0]))
 | 
					            self.kv_data += struct.pack("<I", list(ltype)[0])
 | 
				
			||||||
            self.fout.write(struct.pack("<I", len(val)))
 | 
					            self.kv_data += struct.pack("<I", len(val))
 | 
				
			||||||
            for item in val:
 | 
					            for item in val:
 | 
				
			||||||
                self.write_val(item, write_vtype=False)
 | 
					                self.add_val(item, add_vtype=False)
 | 
				
			||||||
        else:
 | 
					        else:
 | 
				
			||||||
            raise ValueError("Invalid GGUF metadata value type")
 | 
					            raise ValueError("Invalid GGUF metadata value type")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -160,22 +175,23 @@ class GGUFWriter:
 | 
				
			|||||||
    def ggml_pad(x: int, n: int) -> int:
 | 
					    def ggml_pad(x: int, n: int) -> int:
 | 
				
			||||||
        return ((x + n - 1) // n) * n
 | 
					        return ((x + n - 1) // n) * n
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_tensor_info(self, name: str, tensor: np.ndarray):
 | 
					    def add_tensor_info(self, name: str, tensor: np.ndarray):
 | 
				
			||||||
        self.write_key(name)
 | 
					        encoded_name = name.encode("utf8")
 | 
				
			||||||
 | 
					        self.ti_data += struct.pack("<I", len(encoded_name))
 | 
				
			||||||
 | 
					        self.ti_data += encoded_name
 | 
				
			||||||
        n_dims = len(tensor.shape)
 | 
					        n_dims = len(tensor.shape)
 | 
				
			||||||
        self.fout.write(struct.pack("<i", n_dims))
 | 
					        self.ti_data += struct.pack("<I", n_dims)
 | 
				
			||||||
        for i in range(n_dims):
 | 
					        for i in range(n_dims):
 | 
				
			||||||
            self.fout.write(struct.pack("<i", tensor.shape[n_dims - 1 - i]))
 | 
					            self.ti_data += struct.pack("<I", tensor.shape[n_dims - 1 - i])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        assert tensor.dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
 | 
					        assert tensor.dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
 | 
				
			||||||
        dtype = GGMLQuantizationType.F32 if tensor.dtype == np.float32 else GGMLQuantizationType.F16
 | 
					        dtype = GGMLQuantizationType.F32 if tensor.dtype == np.float32 else GGMLQuantizationType.F16
 | 
				
			||||||
        self.fout.write(struct.pack("<i", dtype))
 | 
					        self.ti_data += struct.pack("<I", dtype)
 | 
				
			||||||
        self.fout.write(struct.pack("<Q", self.offset_tensor))
 | 
					        self.ti_data += struct.pack("<Q", self.offset_tensor)
 | 
				
			||||||
        self.offset_tensor += GGUFWriter.ggml_pad(tensor.nbytes, constants.GGUF_DEFAULT_ALIGNMENT)
 | 
					        self.offset_tensor += GGUFWriter.ggml_pad(tensor.nbytes, constants.GGUF_DEFAULT_ALIGNMENT)
 | 
				
			||||||
 | 
					        self.ti_data_count += 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        self.flush()
 | 
					    def write_tensor_to_file(self, tensor: np.ndarray):
 | 
				
			||||||
 | 
					 | 
				
			||||||
    def write_tensor(self, tensor: np.ndarray):
 | 
					 | 
				
			||||||
        pad = GGUFWriter.ggml_pad(self.fout.tell(), constants.GGUF_DEFAULT_ALIGNMENT) - self.fout.tell()
 | 
					        pad = GGUFWriter.ggml_pad(self.fout.tell(), constants.GGUF_DEFAULT_ALIGNMENT) - self.fout.tell()
 | 
				
			||||||
        if pad != 0:
 | 
					        if pad != 0:
 | 
				
			||||||
            self.fout.write(bytes([0] * pad))
 | 
					            self.fout.write(bytes([0] * pad))
 | 
				
			||||||
@@ -191,121 +207,139 @@ class GGUFWriter:
 | 
				
			|||||||
    def close(self):
 | 
					    def close(self):
 | 
				
			||||||
        self.fout.close()
 | 
					        self.fout.close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_architecture(self, architecture: str):
 | 
					    def add_architecture(self, architecture: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_ARCHITECTURE,
 | 
					        self.add_string(constants.KEY_GENERAL_ARCHITECTURE,
 | 
				
			||||||
                          architecture)
 | 
					                          architecture)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_author(self, author: str):
 | 
					    def add_author(self, author: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_AUTHOR, author)
 | 
					        self.add_string(constants.KEY_GENERAL_AUTHOR, author)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_url(self, url: str):
 | 
					    def add_url(self, url: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_URL, url)
 | 
					        self.add_string(constants.KEY_GENERAL_URL, url)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_description(self, description: str):
 | 
					    def add_description(self, description: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_DESCRIPTION, description)
 | 
					        self.add_string(constants.KEY_GENERAL_DESCRIPTION, description)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_file_type(self, file_type: str):
 | 
					    def add_file_type(self, file_type: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_FILE_TYPE, file_type)
 | 
					        self.add_string(constants.KEY_GENERAL_FILE_TYPE, file_type)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_source_url(self, url: str):
 | 
					    def add_source_url(self, url: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_SOURCE_URL, url)
 | 
					        self.add_string(constants.KEY_GENERAL_SOURCE_URL, url)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_source_hf_repo(self, repo: str):
 | 
					    def add_source_hf_repo(self, repo: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_SOURCE_HF_REPO, repo)
 | 
					        self.add_string(constants.KEY_GENERAL_SOURCE_HF_REPO, repo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_name(self, name: str):
 | 
					    def add_name(self, name: str):
 | 
				
			||||||
        self.write_string(constants.KEY_GENERAL_NAME, name)
 | 
					        self.add_string(constants.KEY_GENERAL_NAME, name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_quantization_version(self, quantization_version: GGMLQuantizationType):
 | 
					    def add_quantization_version(self, quantization_version: GGMLQuantizationType):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_GENERAL_QUANTIZATION_VERSION, quantization_version)
 | 
					            constants.KEY_GENERAL_QUANTIZATION_VERSION, quantization_version)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_custom_alignment(self, alignment: int):
 | 
					    def add_custom_alignment(self, alignment: int):
 | 
				
			||||||
        self.write_uint32(constants.KEY_GENERAL_ALIGNMENT, alignment)
 | 
					        self.add_uint32(constants.KEY_GENERAL_ALIGNMENT, alignment)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_context_length(self, llm: str, length: int):
 | 
					    def add_context_length(self, llm: str, length: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_LLM_CONTEXT_LENGTH.format(llm=llm), length)
 | 
					            constants.KEY_LLM_CONTEXT_LENGTH.format(llm=llm), length)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_embedding_length(self, llm: str, length: int):
 | 
					    def add_embedding_length(self, llm: str, length: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_LLM_EMBEDDING_LENGTH.format(llm=llm), length)
 | 
					            constants.KEY_LLM_EMBEDDING_LENGTH.format(llm=llm), length)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_layer_count(self, llm: str, length: int):
 | 
					    def add_layer_count(self, llm: str, length: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_LLM_LAYER_COUNT.format(llm=llm), length)
 | 
					            constants.KEY_LLM_LAYER_COUNT.format(llm=llm), length)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_feed_forward_length(self, llm: str, length: int):
 | 
					    def add_feed_forward_length(self, llm: str, length: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_LLM_FEED_FORWARD_LENGTH.format(llm=llm), length)
 | 
					            constants.KEY_LLM_FEED_FORWARD_LENGTH.format(llm=llm), length)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_parallel_residual(self, llm: str, use: bool):
 | 
					    def add_parallel_residual(self, llm: str, use: bool):
 | 
				
			||||||
        self.write_bool(
 | 
					        self.add_bool(
 | 
				
			||||||
            constants.KEY_LLM_USE_PARALLEL_RESIDUAL.format(llm=llm), use)
 | 
					            constants.KEY_LLM_USE_PARALLEL_RESIDUAL.format(llm=llm), use)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_tensor_data_layout(self, llm: str, layout: str):
 | 
					    def add_tensor_data_layout(self, llm: str, layout: str):
 | 
				
			||||||
        self.write_string(
 | 
					        self.add_string(
 | 
				
			||||||
            constants.KEY_LLM_TENSOR_DATA_LAYOUT.format(llm=llm), layout)
 | 
					            constants.KEY_LLM_TENSOR_DATA_LAYOUT.format(llm=llm), layout)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_head_count(self, llm: str, count: int):
 | 
					    def add_head_count(self, llm: str, count: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_ATTENTION_HEAD_COUNT.format(llm=llm), count)
 | 
					            constants.KEY_ATTENTION_HEAD_COUNT.format(llm=llm), count)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_head_count_kv(self, llm: str, count: int):
 | 
					    def add_head_count_kv(self, llm: str, count: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_ATTENTION_HEAD_COUNT_KV.format(llm=llm), count)
 | 
					            constants.KEY_ATTENTION_HEAD_COUNT_KV.format(llm=llm), count)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_max_alibi_bias(self, llm: str, bias: float):
 | 
					    def add_max_alibi_bias(self, llm: str, bias: float):
 | 
				
			||||||
        self.write_float32(
 | 
					        self.add_float32(
 | 
				
			||||||
            constants.KEY_ATTENTION_MAX_ALIBI_BIAS.format(llm=llm), bias)
 | 
					            constants.KEY_ATTENTION_MAX_ALIBI_BIAS.format(llm=llm), bias)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_clamp_kqv(self, llm: str, value: float):
 | 
					    def add_clamp_kqv(self, llm: str, value: float):
 | 
				
			||||||
        self.write_float32(
 | 
					        self.add_float32(
 | 
				
			||||||
            constants.KEY_ATTENTION_CLAMP_KQV.format(llm=llm), value)
 | 
					            constants.KEY_ATTENTION_CLAMP_KQV.format(llm=llm), value)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_layer_norm_eps(self, llm: str, value: float):
 | 
					    def add_layer_norm_eps(self, llm: str, value: float):
 | 
				
			||||||
        self.write_float32(
 | 
					        self.add_float32(
 | 
				
			||||||
            constants.KEY_ATTENTION_LAYERNORM_EPS.format(llm=llm), value)
 | 
					            constants.KEY_ATTENTION_LAYERNORM_EPS.format(llm=llm), value)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_layer_norm_rms_eps(self, llm: str, value: float):
 | 
					    def add_layer_norm_rms_eps(self, llm: str, value: float):
 | 
				
			||||||
        self.write_float32(
 | 
					        self.add_float32(
 | 
				
			||||||
            constants.KEY_ATTENTION_LAYERNORM_RMS_EPS.format(llm=llm), value)
 | 
					            constants.KEY_ATTENTION_LAYERNORM_RMS_EPS.format(llm=llm), value)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_rope_dimension_count(self, llm: str, count: int):
 | 
					    def add_rope_dimension_count(self, llm: str, count: int):
 | 
				
			||||||
        self.write_uint32(
 | 
					        self.add_uint32(
 | 
				
			||||||
            constants.KEY_ROPE_DIMENSION_COUNT.format(llm=llm), count)
 | 
					            constants.KEY_ROPE_DIMENSION_COUNT.format(llm=llm), count)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_rope_scale(self, llm: str, value:  float):
 | 
					    def add_rope_scale(self, llm: str, value:  float):
 | 
				
			||||||
        self.write_float32(constants.KEY_ROPE_SCALE.format(llm=llm), value)
 | 
					        self.add_float32(constants.KEY_ROPE_SCALE.format(llm=llm), value)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_tokenizer_model(self, model: str):
 | 
					    def add_tokenizer_model(self, model: str):
 | 
				
			||||||
        self.write_string(constants.KEY_TOKENIZER_MODEL, model)
 | 
					        self.add_string(constants.KEY_TOKENIZER_MODEL, model)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_token_list(self, tokens: List):
 | 
					    def add_token_list(self, tokens: List):
 | 
				
			||||||
        self.write_array(constants.KEY_TOKENIZER_LIST, tokens)
 | 
					        self.add_array(constants.KEY_TOKENIZER_LIST, tokens)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_token_merges(self, merges: List):
 | 
					    def add_token_merges(self, merges: List):
 | 
				
			||||||
        self.write_array(constants.KEY_TOKENIZER_MERGES, merges)
 | 
					        self.add_array(constants.KEY_TOKENIZER_MERGES, merges)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def write_token_scores(self, scores: List[float]):
 | 
					    def add_token_scores(self, scores: List[float]):
 | 
				
			||||||
        self.write_array(constants.KEY_TOKENIZER_SCORES, scores)
 | 
					        self.add_array(constants.KEY_TOKENIZER_SCORES, scores)
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    def add_bos_token_id(self, id: int):
 | 
				
			||||||
 | 
					        self.add_uint32(constants.KEY_TOKENIZER_BOS_ID, id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def add_eos_token_id(self, id: int):
 | 
				
			||||||
 | 
					        self.add_uint32(constants.KEY_TOKENIZER_EOS_ID, id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def add_unk_token_id(self, id: int):
 | 
				
			||||||
 | 
					        self.add_uint32(constants.KEY_TOKENIZER_UNK_ID, id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def add_sep_token_id(self, id: int):
 | 
				
			||||||
 | 
					        self.add_uint32(constants.KEY_TOKENIZER_SEP_ID, id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def add_pad_token_id(self, id: int):
 | 
				
			||||||
 | 
					        self.add_uint32(constants.KEY_TOKENIZER_PAD_ID, id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Example usage:
 | 
					# Example usage:
 | 
				
			||||||
if __name__ == "__main__":
 | 
					if __name__ == "__main__":
 | 
				
			||||||
    # Example usage with a file
 | 
					    # Example usage with a file
 | 
				
			||||||
    gguf_writer = GGUFWriter.open("example.gguf")
 | 
					    gguf_writer = GGUFWriter.open("example.gguf")
 | 
				
			||||||
    gguf_writer.write_header(2, 4)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    gguf_writer.write_architecture("llama")
 | 
					    gguf_writer.add_architecture("llama")
 | 
				
			||||||
    gguf_writer.write_uint32("answer", 42)  # Write a 32-bit integer
 | 
					    gguf_writer.add_uint32("answer", 42)  # Write a 32-bit integer
 | 
				
			||||||
    gguf_writer.write_float32("answer_in_float", 42.0)  # Write a 32-bit float
 | 
					    gguf_writer.add_float32("answer_in_float", 42.0)  # Write a 32-bit float
 | 
				
			||||||
    gguf_writer.write_custom_alignment(64)
 | 
					    gguf_writer.add_custom_alignment(64)
 | 
				
			||||||
    tensor1 = np.ones((32,), dtype=np.float32) * 100.0
 | 
					    tensor1 = np.ones((32,), dtype=np.float32) * 100.0
 | 
				
			||||||
    tensor2 = np.ones((32,), dtype=np.float32) * 101.0
 | 
					    tensor2 = np.ones((32,), dtype=np.float32) * 101.0
 | 
				
			||||||
    gguf_writer.write_tensor_info("tensor0", tensor1)
 | 
					    gguf_writer.add_tensor_info("tensor0", tensor1)
 | 
				
			||||||
    gguf_writer.write_tensor_info("tensor1", tensor2)
 | 
					    gguf_writer.add_tensor_info("tensor1", tensor2)
 | 
				
			||||||
    gguf_writer.write_tensor(tensor1)
 | 
					
 | 
				
			||||||
    gguf_writer.write_tensor(tensor2)
 | 
					    gguf_writer.write_header_to_file()
 | 
				
			||||||
 | 
					    gguf_writer.write_kv_data_to_file()
 | 
				
			||||||
 | 
					    gguf_writer.write_ti_data_to_file()
 | 
				
			||||||
 | 
					    gguf_writer.write_tensor_to_file(tensor1)
 | 
				
			||||||
 | 
					    gguf_writer.write_tensor_to_file(tensor2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    gguf_writer.close()
 | 
					    gguf_writer.close()
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user