mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	gguf.py : string len uint64_t and n_dims uint32_t
This commit is contained in:
		@@ -509,7 +509,7 @@ class GGUFWriter:
 | 
				
			|||||||
            self.kv_data += struct.pack("?", val)
 | 
					            self.kv_data += struct.pack("?", val)
 | 
				
			||||||
        elif vtype == GGUFValueType.STRING:
 | 
					        elif vtype == GGUFValueType.STRING:
 | 
				
			||||||
            encoded_val = val.encode("utf8") if isinstance(val, str) else val
 | 
					            encoded_val = val.encode("utf8") if isinstance(val, str) else val
 | 
				
			||||||
            self.kv_data += struct.pack("<I", len(encoded_val))
 | 
					            self.kv_data += struct.pack("<Q", len(encoded_val))
 | 
				
			||||||
            self.kv_data += encoded_val
 | 
					            self.kv_data += encoded_val
 | 
				
			||||||
        elif vtype == GGUFValueType.ARRAY:
 | 
					        elif vtype == GGUFValueType.ARRAY:
 | 
				
			||||||
            ltype = set([GGUFValueType.get_type(item) for item in val])
 | 
					            ltype = set([GGUFValueType.get_type(item) for item in val])
 | 
				
			||||||
@@ -529,10 +529,10 @@ class GGUFWriter:
 | 
				
			|||||||
        assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
 | 
					        assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        encoded_name = name.encode("utf8")
 | 
					        encoded_name = name.encode("utf8")
 | 
				
			||||||
        self.ti_data += struct.pack("<I", len(encoded_name))
 | 
					        self.ti_data += struct.pack("<Q", len(encoded_name))
 | 
				
			||||||
        self.ti_data += encoded_name
 | 
					        self.ti_data += encoded_name
 | 
				
			||||||
        n_dims = len(tensor_shape)
 | 
					        n_dims = len(tensor_shape)
 | 
				
			||||||
        self.ti_data += struct.pack("<Q", n_dims)
 | 
					        self.ti_data += struct.pack("<I", n_dims)
 | 
				
			||||||
        for i in range(n_dims):
 | 
					        for i in range(n_dims):
 | 
				
			||||||
            self.ti_data += struct.pack("<Q", tensor_shape[n_dims - 1 - i])
 | 
					            self.ti_data += struct.pack("<Q", tensor_shape[n_dims - 1 - i])
 | 
				
			||||||
        if raw_dtype is None:
 | 
					        if raw_dtype is None:
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user