mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	convert.py : start a new simplified implementation by removing old stuff
This commit is contained in:
		
							
								
								
									
										961
									
								
								convert-new.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										961
									
								
								convert-new.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,961 @@ | |||||||
|  | #!/usr/bin/env python | ||||||
|  |  | ||||||
|  | import argparse | ||||||
|  | import concurrent.futures | ||||||
|  | import copy | ||||||
|  | import enum | ||||||
|  | import faulthandler | ||||||
|  | import functools | ||||||
|  | import io | ||||||
|  | import itertools | ||||||
|  | import json | ||||||
|  | import math | ||||||
|  | import mmap | ||||||
|  | import pickle | ||||||
|  | import re | ||||||
|  | import signal | ||||||
|  | import struct | ||||||
|  | import sys | ||||||
|  | import zipfile | ||||||
|  | import numpy as np | ||||||
|  |  | ||||||
|  | from abc import ABCMeta, abstractmethod | ||||||
|  | from dataclasses import dataclass | ||||||
|  | from pathlib import Path | ||||||
|  | from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, TypeVar, Union) | ||||||
|  | from sentencepiece import SentencePieceProcessor  # type: ignore | ||||||
|  |  | ||||||
|  | if TYPE_CHECKING: | ||||||
|  |     from typing_extensions import TypeAlias | ||||||
|  |  | ||||||
|  | if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'): | ||||||
|  |     faulthandler.register(signal.SIGUSR1) | ||||||
|  |  | ||||||
|  | NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' | ||||||
|  |  | ||||||
|  | @dataclass(frozen=True) | ||||||
|  | class UnquantizedDataType: | ||||||
|  |     name: str | ||||||
|  |  | ||||||
|  | DT_F16  = UnquantizedDataType('F16') | ||||||
|  | DT_F32  = UnquantizedDataType('F32') | ||||||
|  | DT_I32  = UnquantizedDataType('I32') | ||||||
|  | DT_BF16 = UnquantizedDataType('BF16') | ||||||
|  |  | ||||||
|  | DataType = Union[UnquantizedDataType] | ||||||
|  |  | ||||||
|  | DATA_TYPE_TO_FTYPE: Dict[DataType, int] = { | ||||||
|  |     DT_F32: 0, | ||||||
|  |     DT_F16: 1, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \ | ||||||
|  |     {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()} | ||||||
|  |  | ||||||
|  | DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = { | ||||||
|  |     DT_BF16: np.dtype(np.uint16), | ||||||
|  |     DT_F16:  np.dtype(np.float16), | ||||||
|  |     DT_F32:  np.dtype(np.float32), | ||||||
|  |     DT_I32:  np.dtype(np.int32), | ||||||
|  | } | ||||||
|  |  | ||||||
|  | NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \ | ||||||
|  |     {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()} | ||||||
|  |  | ||||||
|  | class GGMLFileType(enum.Enum): | ||||||
|  |     AllF32    = 0 | ||||||
|  |     MostlyF16 = 1  # except 1d tensors | ||||||
|  |  | ||||||
|  |     def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType: | ||||||
|  |         if len(tensor.shape) == 1: | ||||||
|  |             # 1D tensors are always F32. | ||||||
|  |             return DT_F32 | ||||||
|  |         elif self == GGMLFileType.AllF32: | ||||||
|  |             return DT_F32 | ||||||
|  |         elif self == GGMLFileType.MostlyF16: | ||||||
|  |             return DT_F16 | ||||||
|  |         else: | ||||||
|  |             raise ValueError(self) | ||||||
|  |  | ||||||
|  | # TODO: this is LLaMA specific | ||||||
|  | def make_tensors_list() -> List[str]: | ||||||
|  |     ret = [ | ||||||
|  |         'tok_embeddings.weight', | ||||||
|  |         'norm.weight', | ||||||
|  |         'output.weight', | ||||||
|  |     ] | ||||||
|  |     for i in range(80):  # maximum number of layer | ||||||
|  |         ret += [ | ||||||
|  |             f'layers.{i}.attention.wq.weight', | ||||||
|  |             f'layers.{i}.attention.wk.weight', | ||||||
|  |             f'layers.{i}.attention.wv.weight', | ||||||
|  |             f'layers.{i}.attention.wo.weight', | ||||||
|  |             f'layers.{i}.attention_norm.weight', | ||||||
|  |             f'layers.{i}.feed_forward.w1.weight', | ||||||
|  |             f'layers.{i}.feed_forward.w2.weight', | ||||||
|  |             f'layers.{i}.feed_forward.w3.weight', | ||||||
|  |             f'layers.{i}.ffn_norm.weight', | ||||||
|  |         ] | ||||||
|  |     return ret | ||||||
|  |  | ||||||
|  | # TODO: this should be generalized for non-LLaMA models | ||||||
|  | TENSORS_LIST = make_tensors_list() | ||||||
|  | TENSORS_SET = set(TENSORS_LIST) | ||||||
|  |  | ||||||
|  | def find_n_mult(n_ff: int, n_embd: int) -> int: | ||||||
|  |     # hardcoded magic range | ||||||
|  |     for n_mult in range(256, 1, -1): | ||||||
|  |         calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult | ||||||
|  |         if calc_ff == n_ff: | ||||||
|  |             return n_mult | ||||||
|  |     raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @dataclass | ||||||
|  | class Params: | ||||||
|  |     n_vocab: int | ||||||
|  |     n_embd:  int | ||||||
|  |     n_mult:  int | ||||||
|  |     n_head:  int | ||||||
|  |     n_layer: int | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def guessed(model: 'LazyModel') -> 'Params': | ||||||
|  |         # try transformer naming first | ||||||
|  |         n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape | ||||||
|  |  | ||||||
|  |         # try transformer naming first | ||||||
|  |         if "model.layers.0.self_attn.q_proj.weight" in model: | ||||||
|  |             n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model) | ||||||
|  |         elif "model.layers.0.self_attn.W_pack.weight" in model:   # next: try baichuan naming | ||||||
|  |             n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model) | ||||||
|  |         else: | ||||||
|  |             n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model) | ||||||
|  |  | ||||||
|  |         if n_layer < 1: | ||||||
|  |             raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n" | ||||||
|  |                             "Suggestion: provide 'config.json' of the model in the same directory containing model files.") | ||||||
|  |  | ||||||
|  |         n_head=n_embd // 128 # guessed | ||||||
|  |  | ||||||
|  |         return Params( | ||||||
|  |             n_vocab = n_vocab, | ||||||
|  |             n_embd  = n_embd, | ||||||
|  |             n_mult  = 256, | ||||||
|  |             n_head  = n_head, | ||||||
|  |             n_layer = n_layer, | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params': | ||||||
|  |         config = json.load(open(config_path)) | ||||||
|  |  | ||||||
|  |         n_vocab = config["vocab_size"]; | ||||||
|  |         n_embd  = config["hidden_size"]; | ||||||
|  |         n_head  = config["num_attention_heads"]; | ||||||
|  |         n_layer = config["num_hidden_layers"]; | ||||||
|  |         n_ff    = config["intermediate_size"]; | ||||||
|  |  | ||||||
|  |         n_mult = find_n_mult(n_ff, n_embd); | ||||||
|  |  | ||||||
|  |         return Params( | ||||||
|  |             n_vocab = n_vocab, | ||||||
|  |             n_embd  = n_embd, | ||||||
|  |             n_mult  = n_mult, | ||||||
|  |             n_head  = n_head, | ||||||
|  |             n_layer = n_layer, | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     # LLaMA v2 70B params.json | ||||||
|  |     # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1 | ||||||
|  |     @staticmethod | ||||||
|  |     def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params': | ||||||
|  |         config = json.load(open(config_path)) | ||||||
|  |  | ||||||
|  |         n_vocab = config["vocab_size"]; | ||||||
|  |         n_embd  = config["dim"]; | ||||||
|  |         n_head  = config["n_heads"]; | ||||||
|  |         n_layer = config["n_layers"]; | ||||||
|  |         n_mult  = config["multiple_of"]; | ||||||
|  |  | ||||||
|  |         if n_vocab == -1: | ||||||
|  |             n_vocab = model["tok_embeddings.weight"].shape[0] | ||||||
|  |  | ||||||
|  |         return Params( | ||||||
|  |             n_vocab = n_vocab, | ||||||
|  |             n_embd  = n_embd, | ||||||
|  |             n_mult  = n_mult, | ||||||
|  |             n_head  = n_head, | ||||||
|  |             n_layer = n_layer, | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def load(model_plus: 'ModelPlus') -> 'Params': | ||||||
|  |         hf_config_path   = model_plus.paths[0].parent / "config.json" | ||||||
|  |         orig_config_path = model_plus.paths[0].parent / "params.json" | ||||||
|  |  | ||||||
|  |         if hf_config_path.exists(): | ||||||
|  |             params = Params.loadHFTransformerJson(model_plus.model, hf_config_path) | ||||||
|  |         elif orig_config_path.exists(): | ||||||
|  |             params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path) | ||||||
|  |         else: | ||||||
|  |             params = Params.guessed(model_plus.model) | ||||||
|  |  | ||||||
|  |         print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}') | ||||||
|  |         return params | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SentencePieceVocab: | ||||||
|  |     def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None: | ||||||
|  |         self.vocabtype = vocabtype | ||||||
|  |         if self.vocabtype == "bpe": | ||||||
|  |             self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read()) | ||||||
|  |         else: | ||||||
|  |             self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) | ||||||
|  |  | ||||||
|  |         added_tokens: Dict[str, int] | ||||||
|  |         if fname_added_tokens is not None: | ||||||
|  |             added_tokens = json.load(open(fname_added_tokens)) | ||||||
|  |         else: | ||||||
|  |             added_tokens = {} | ||||||
|  |  | ||||||
|  |         if self.vocabtype == "bpe": | ||||||
|  |             vocab_size: int = len(self.sentencepiece_tokenizer) | ||||||
|  |         else: | ||||||
|  |             vocab_size: int = self.sentencepiece_tokenizer.vocab_size() | ||||||
|  |  | ||||||
|  |         expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) | ||||||
|  |         actual_ids   = sorted(added_tokens.values()) | ||||||
|  |         if expected_ids != actual_ids: | ||||||
|  |             raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}") | ||||||
|  |  | ||||||
|  |         items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) | ||||||
|  |         self.added_tokens_list = [text for (text, idx) in items] | ||||||
|  |         self.vocab_size_base: int = vocab_size | ||||||
|  |         self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) | ||||||
|  |         self.fname_tokenizer = fname_tokenizer | ||||||
|  |         self.fname_added_tokens = fname_added_tokens | ||||||
|  |  | ||||||
|  |     def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]: | ||||||
|  |         tokenizer = self.sentencepiece_tokenizer | ||||||
|  |         if self.vocabtype == "bpe": | ||||||
|  |             from transformers.models.gpt2 import tokenization_gpt2 | ||||||
|  |             byte_encoder = tokenization_gpt2.bytes_to_unicode() | ||||||
|  |             byte_decoder = {v: k for k, v in byte_encoder.items()} | ||||||
|  |             for i, item in enumerate(tokenizer): | ||||||
|  |                 text: bytes | ||||||
|  |                 text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]]) | ||||||
|  |                 score: float = -i | ||||||
|  |                 yield text, score | ||||||
|  |         else: | ||||||
|  |             for i in range(tokenizer.vocab_size()): | ||||||
|  |                 text: bytes | ||||||
|  |                 if tokenizer.is_unknown(i): | ||||||
|  |                     text = " \u2047 ".encode("utf-8") | ||||||
|  |                 elif tokenizer.is_control(i): | ||||||
|  |                     text = b"" | ||||||
|  |                 elif tokenizer.is_byte(i): | ||||||
|  |                     piece = tokenizer.id_to_piece(i) | ||||||
|  |                     if len(piece) != 6: | ||||||
|  |                         raise Exception(f"Invalid token: {piece}") | ||||||
|  |                     byte_value = int(piece[3:-1], 16) | ||||||
|  |                     text = struct.pack("B", byte_value) | ||||||
|  |                 else: | ||||||
|  |                     text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") | ||||||
|  |                 score: float = tokenizer.get_score(i) | ||||||
|  |                 yield text, score | ||||||
|  |  | ||||||
|  |     def added_tokens(self) -> Iterable[Tuple[bytes, float]]: | ||||||
|  |         for text in self.added_tokens_list: | ||||||
|  |             score = -1000.0 | ||||||
|  |             yield text.encode("utf-8"), score | ||||||
|  |  | ||||||
|  |     def all_tokens(self) -> Iterable[Tuple[bytes, float]]: | ||||||
|  |         yield from self.sentencepiece_tokens() | ||||||
|  |         yield from self.added_tokens() | ||||||
|  |  | ||||||
|  |     def __repr__(self) -> str: | ||||||
|  |         return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class GGMLVocab: | ||||||
|  |     def __init__(self, tokens: List[Tuple[bytes, float]]): | ||||||
|  |         self.tokens = tokens | ||||||
|  |         self.vocab_size = len(tokens) | ||||||
|  |  | ||||||
|  |     def all_tokens(self) -> Iterable[Tuple[bytes, float]]: | ||||||
|  |         return self.tokens | ||||||
|  |  | ||||||
|  |     def __repr__(self) -> str: | ||||||
|  |         return f"<GGMLVocab with {self.vocab_size} tokens>" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | Vocab = Union[SentencePieceVocab, GGMLVocab] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def permute(weights: NDArray, n_head: int) -> NDArray: | ||||||
|  |     return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) | ||||||
|  |                    .swapaxes(1, 2) | ||||||
|  |                    .reshape(weights.shape)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Tensor(metaclass=ABCMeta): | ||||||
|  |     data_type: DataType | ||||||
|  |  | ||||||
|  |     @abstractmethod | ||||||
|  |     def astype(self, data_type: DataType) -> 'Tensor': ... | ||||||
|  |     @abstractmethod | ||||||
|  |     def permute(self, n_head: int) -> 'Tensor': ... | ||||||
|  |     @abstractmethod | ||||||
|  |     def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ... | ||||||
|  |     @abstractmethod | ||||||
|  |     def part(self, n_part: int) -> 'UnquantizedTensor': ... | ||||||
|  |     @abstractmethod | ||||||
|  |     def to_ggml(self) -> 'GGMLCompatibleTensor': ... | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray: | ||||||
|  |     assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}" | ||||||
|  |     fp32_arr = bf16_arr.astype(np.uint32) << 16 | ||||||
|  |     return fp32_arr.view(np.float32) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class UnquantizedTensor(Tensor): | ||||||
|  |     def __init__(self, ndarray: NDArray) -> None: | ||||||
|  |         assert isinstance(ndarray, np.ndarray) | ||||||
|  |         self.ndarray = ndarray | ||||||
|  |         self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype] | ||||||
|  |  | ||||||
|  |     def astype(self, data_type: DataType) -> Tensor: | ||||||
|  |         dtype = DATA_TYPE_TO_NUMPY[data_type] | ||||||
|  |         if self.data_type == DT_BF16: | ||||||
|  |             self.ndarray = bf16_to_fp32(self.ndarray) | ||||||
|  |         return UnquantizedTensor(self.ndarray.astype(dtype)) | ||||||
|  |  | ||||||
|  |     def to_ggml(self) -> 'UnquantizedTensor': | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': | ||||||
|  |         r = self.ndarray.shape[0] // 3 | ||||||
|  |         return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head)) | ||||||
|  |  | ||||||
|  |     def part(self, n_part: int) -> 'UnquantizedTensor': | ||||||
|  |         r = self.ndarray.shape[0] // 3 | ||||||
|  |         return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...]) | ||||||
|  |  | ||||||
|  |     def permute(self, n_head: int) -> 'UnquantizedTensor': | ||||||
|  |         return UnquantizedTensor(permute(self.ndarray, n_head)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray: | ||||||
|  |     tensor = lazy_tensor.load() | ||||||
|  |     assert isinstance(tensor, UnquantizedTensor) | ||||||
|  |  | ||||||
|  |     # double-check: | ||||||
|  |     actual_shape = list(tensor.ndarray.shape) | ||||||
|  |     assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape) | ||||||
|  |     if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype: | ||||||
|  |         if convert: | ||||||
|  |             tensor.ndarray = tensor.ndarray.astype(expected_dtype) | ||||||
|  |         else: | ||||||
|  |             raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}') | ||||||
|  |  | ||||||
|  |     return tensor.ndarray | ||||||
|  |  | ||||||
|  |  | ||||||
|  | GGMLCompatibleTensor = Union[UnquantizedTensor] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DeferredPermutedTensor(Tensor): | ||||||
|  |     def __init__(self, base: Tensor, n_head: int) -> None: | ||||||
|  |         self.base = base | ||||||
|  |         self.n_head = n_head | ||||||
|  |         self.data_type = self.base.data_type | ||||||
|  |  | ||||||
|  |     def astype(self, data_type: DataType) -> Tensor: | ||||||
|  |         return self.base.astype(data_type).permute(self.n_head) | ||||||
|  |  | ||||||
|  |     def to_ggml(self) -> GGMLCompatibleTensor: | ||||||
|  |         return self.base.to_ggml().permute(self.n_head) | ||||||
|  |  | ||||||
|  |     def permute(self, n_head: int) -> Tensor: | ||||||
|  |         raise Exception("shouldn't permute twice") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @dataclass | ||||||
|  | class LazyTensor: | ||||||
|  |     _load: Callable[[], Tensor] | ||||||
|  |     shape: List[int] | ||||||
|  |     data_type: DataType | ||||||
|  |     description: str | ||||||
|  |  | ||||||
|  |     def load(self) -> Tensor: | ||||||
|  |         ret = self._load() | ||||||
|  |         assert ret.data_type == self.data_type, (self.data_type, ret.data_type, self.description) | ||||||
|  |         return ret | ||||||
|  |  | ||||||
|  |     def astype(self, data_type: DataType) -> 'LazyTensor': | ||||||
|  |         self.validate_conversion_to(data_type) | ||||||
|  |  | ||||||
|  |         def load() -> Tensor: | ||||||
|  |             return self.load().astype(data_type) | ||||||
|  |         return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}') | ||||||
|  |  | ||||||
|  |     def validate_conversion_to(self, data_type: DataType) -> None: | ||||||
|  |         if data_type == self.data_type: | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |  | ||||||
|  | LazyModel = Dict[str, LazyTensor] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @dataclass | ||||||
|  | class ModelPlus: | ||||||
|  |     model: LazyModel | ||||||
|  |     paths: List[Path]  # Where this was read from. | ||||||
|  |     format: Literal['ggml', 'torch', 'safetensors'] | ||||||
|  |     vocab: Optional[Vocab]  # For GGML models (which have vocab built in), the vocab. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def merge_sharded(models: List[LazyModel]) -> LazyModel: | ||||||
|  |     # Original LLaMA models have each file contain one part of each tensor. | ||||||
|  |     # Use a dict instead of a set to preserve order. | ||||||
|  |     names = {name: None for model in models for name in model} | ||||||
|  |  | ||||||
|  |     def convert(name: str) -> LazyTensor: | ||||||
|  |         lazy_tensors: List[LazyTensor] = [model[name] for model in models] | ||||||
|  |         if len(lazy_tensors) == 1: | ||||||
|  |             # only one file; don't go through this procedure since there might | ||||||
|  |             # be quantized tensors | ||||||
|  |             return lazy_tensors[0] | ||||||
|  |         if len(lazy_tensors[0].shape) == 1: | ||||||
|  |             # the tensor is just duplicated in every file | ||||||
|  |             return lazy_tensors[0] | ||||||
|  |         if name.startswith('tok_embeddings.') or \ | ||||||
|  |            name.endswith('.attention.wo.weight') or \ | ||||||
|  |            name.endswith('.feed_forward.w2.weight'): | ||||||
|  |             # split by columns | ||||||
|  |             axis = 1 | ||||||
|  |         else: | ||||||
|  |             # split by rows | ||||||
|  |             axis = 0 | ||||||
|  |         concatenated_shape = list(lazy_tensors[0].shape) | ||||||
|  |         concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors) | ||||||
|  |  | ||||||
|  |         def load() -> UnquantizedTensor: | ||||||
|  |             ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors] | ||||||
|  |             concatenated: NDArray = np.concatenate(ndarrays, axis=axis) | ||||||
|  |             return UnquantizedTensor(concatenated) | ||||||
|  |         description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]' | ||||||
|  |         return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description) | ||||||
|  |     return {name: convert(name) for name in names} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus: | ||||||
|  |     formats = set(mp.format for mp in models_plus) | ||||||
|  |     assert len(formats) == 1, "different formats?" | ||||||
|  |     format = formats.pop() | ||||||
|  |     paths = [path for mp in models_plus for path in mp.paths] | ||||||
|  |     # Use the first non-None vocab, if any. | ||||||
|  |     try: | ||||||
|  |         vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None) | ||||||
|  |     except StopIteration: | ||||||
|  |         vocab = None | ||||||
|  |  | ||||||
|  |     if any("model.embed_tokens.weight" in mp.model for mp in models_plus): | ||||||
|  |         # Transformers models put different tensors in different files, but | ||||||
|  |         # don't split indivdual tensors between files. | ||||||
|  |         model: LazyModel = {} | ||||||
|  |         for mp in models_plus: | ||||||
|  |             model.update(mp.model) | ||||||
|  |     else: | ||||||
|  |         model = merge_sharded([mp.model for mp in models_plus]) | ||||||
|  |  | ||||||
|  |     return ModelPlus(model, paths, format, vocab) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor: | ||||||
|  |     def load() -> Tensor: | ||||||
|  |         return lazy_tensor.load().permute(n_head) | ||||||
|  |     return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description) | ||||||
|  |  | ||||||
|  | def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor: | ||||||
|  |     def load() -> Tensor: | ||||||
|  |         return lazy_tensor.load().permute_part(n_part, n_head) | ||||||
|  |     s = lazy_tensor.shape.copy() | ||||||
|  |     s[0] = s[0] // 3 | ||||||
|  |     return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description) | ||||||
|  |  | ||||||
|  | def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor: | ||||||
|  |     def load() -> Tensor: | ||||||
|  |         return lazy_tensor.load().part(n_part) | ||||||
|  |     s = lazy_tensor.shape.copy() | ||||||
|  |     s[0] = s[0] // 3 | ||||||
|  |     return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description) | ||||||
|  |  | ||||||
|  | def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel: | ||||||
|  |     out: LazyModel = {} | ||||||
|  |     out["tok_embeddings.weight"] = model["model.embed_tokens.weight"] | ||||||
|  |     out["norm.weight"]           = model["model.norm.weight"] | ||||||
|  |     out["output.weight"]         = model["lm_head.weight"] | ||||||
|  |  | ||||||
|  |     for i in itertools.count(): | ||||||
|  |         if f"model.layers.{i}.self_attn.q_proj.weight" in model: | ||||||
|  |             out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head) | ||||||
|  |             out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head) | ||||||
|  |             out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] | ||||||
|  |         elif f"model.layers.{i}.self_attn.W_pack.weight" in model: | ||||||
|  |             out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head) | ||||||
|  |             out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head) | ||||||
|  |             out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) | ||||||
|  |         else: | ||||||
|  |             break | ||||||
|  |  | ||||||
|  |         out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"] | ||||||
|  |  | ||||||
|  |         out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"] | ||||||
|  |         out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"] | ||||||
|  |         out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"] | ||||||
|  |  | ||||||
|  |         out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"] | ||||||
|  |         out[f"layers.{i}.ffn_norm.weight"]       = model[f"model.layers.{i}.post_attention_layernorm.weight"] | ||||||
|  |     return out | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Functionality that simulates `torch.load` but where individual tensors are | ||||||
|  | # only loaded into memory on demand, not all at once. | ||||||
|  | # PyTorch can't do this natively as of time of writing: | ||||||
|  | # - https://github.com/pytorch/pytorch/issues/64327 | ||||||
|  | # This allows us to de-shard without multiplying RAM usage, and also | ||||||
|  | # conveniently drops the PyTorch dependency (though we still need numpy). | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @dataclass | ||||||
|  | class LazyStorageKind: | ||||||
|  |     data_type: DataType | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @dataclass | ||||||
|  | class LazyStorage: | ||||||
|  |     load: Callable[[int, int], NDArray] | ||||||
|  |     kind: LazyStorageKind | ||||||
|  |     description: str | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LazyUnpickler(pickle.Unpickler): | ||||||
|  |     def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile): | ||||||
|  |         super().__init__(fp) | ||||||
|  |         self.data_base_path = data_base_path | ||||||
|  |         self.zip_file = zip_file | ||||||
|  |  | ||||||
|  |     def persistent_load(self, pid: Any) -> Any: | ||||||
|  |         assert pid[0] == 'storage' | ||||||
|  |         assert isinstance(pid[1], LazyStorageKind) | ||||||
|  |         data_type = pid[1].data_type | ||||||
|  |         filename_stem = pid[2] | ||||||
|  |         filename = self.data_base_path + '/' + filename_stem | ||||||
|  |         info = self.zip_file.getinfo(filename) | ||||||
|  |  | ||||||
|  |         def load(offset: int, elm_count: int) -> NDArray: | ||||||
|  |             dtype = DATA_TYPE_TO_NUMPY.get(data_type) | ||||||
|  |             if dtype is None: | ||||||
|  |                 raise Exception("tensor stored in unsupported format") | ||||||
|  |             fp = self.zip_file.open(info) | ||||||
|  |             fp.seek(offset * dtype.itemsize) | ||||||
|  |             size = elm_count * dtype.itemsize | ||||||
|  |             data = fp.read(size) | ||||||
|  |             assert len(data) == size | ||||||
|  |             return np.frombuffer(data, dtype) | ||||||
|  |         description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}' | ||||||
|  |         return LazyStorage(load=load, kind=pid[1], description=description) | ||||||
|  |  | ||||||
|  |     # @staticmethod | ||||||
|  |     def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, | ||||||
|  |                                # pyright: ignore[reportSelfClsParameterName] | ||||||
|  |                                requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor: | ||||||
|  |         assert isinstance(storage, LazyStorage) | ||||||
|  |  | ||||||
|  |         def load() -> UnquantizedTensor: | ||||||
|  |             elm_count = stride[0] * size[0] | ||||||
|  |             return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size)) | ||||||
|  |         description = f'pickled storage_offset={storage_offset} in {storage.description}' | ||||||
|  |         return LazyTensor(load, list(size), storage.kind.data_type, description) | ||||||
|  |  | ||||||
|  |     # @staticmethod | ||||||
|  |     def rebuild_from_type_v2(func, new_type, args, state): | ||||||
|  |         return func(*args) | ||||||
|  |  | ||||||
|  |     CLASSES: Dict[Any, Any] = { | ||||||
|  |         ('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2, | ||||||
|  |         ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2, | ||||||
|  |         ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16), | ||||||
|  |         ('torch', 'HalfStorage'): LazyStorageKind(DT_F16), | ||||||
|  |         ('torch', 'FloatStorage'): LazyStorageKind(DT_F32), | ||||||
|  |         ('torch', 'IntStorage'): LazyStorageKind(DT_I32), | ||||||
|  |         ('torch', 'Tensor'): LazyTensor, | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def find_class(self, module: str, name: str) -> Any: | ||||||
|  |         if not module.startswith('torch'): | ||||||
|  |             return super().find_class(module, name) | ||||||
|  |         return self.CLASSES[(module, name)] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus: | ||||||
|  |     zf = zipfile.ZipFile(outer_fp) | ||||||
|  |     pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')] | ||||||
|  |     assert len(pickle_paths) == 1, pickle_paths | ||||||
|  |     pickle_fp = zf.open(pickle_paths[0], 'r') | ||||||
|  |     unpickler = LazyUnpickler(pickle_fp, | ||||||
|  |                               data_base_path=pickle_paths[0][:-4], | ||||||
|  |                               zip_file=zf) | ||||||
|  |     model = unpickler.load() | ||||||
|  |     as_dict = dict(model.items()) | ||||||
|  |     return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | SAFETENSORS_DATA_TYPES: Dict[str, DataType] = { | ||||||
|  |     'BF16': DT_BF16, | ||||||
|  |     'F16': DT_F16, | ||||||
|  |     'F32': DT_F32, | ||||||
|  |     'I32': DT_I32, | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus: | ||||||
|  |     header_size, = struct.unpack('<Q', fp.read(8)) | ||||||
|  |     header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size)) | ||||||
|  |     # Use mmap for the actual data to avoid race conditions with the file offset. | ||||||
|  |     mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)) | ||||||
|  |     byte_buf = mapped[8 + header_size:] | ||||||
|  |  | ||||||
|  |     def convert(info: Dict[str, Any]) -> LazyTensor: | ||||||
|  |         data_type = SAFETENSORS_DATA_TYPES[info['dtype']] | ||||||
|  |         numpy_dtype = DATA_TYPE_TO_NUMPY[data_type] | ||||||
|  |         shape: List[int] = info['shape'] | ||||||
|  |         begin, end = info['data_offsets'] | ||||||
|  |         assert 0 <= begin <= end <= len(byte_buf) | ||||||
|  |         assert end - begin == math.prod(shape) * numpy_dtype.itemsize | ||||||
|  |         buf = byte_buf[begin:end] | ||||||
|  |  | ||||||
|  |         def load() -> UnquantizedTensor: | ||||||
|  |             return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape)) | ||||||
|  |         description = f'safetensors begin={begin} end={end} type={data_type} path={path}' | ||||||
|  |         return LazyTensor(load, shape, data_type, description) | ||||||
|  |     model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'} | ||||||
|  |     return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def must_read(fp: IO[bytes], length: int) -> bytes: | ||||||
|  |     ret = fp.read(length) | ||||||
|  |     if len(ret) < length: | ||||||
|  |         raise Exception("unexpectedly reached end of file") | ||||||
|  |     return ret | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @functools.lru_cache(maxsize=None) | ||||||
|  | def lazy_load_file(path: Path) -> ModelPlus: | ||||||
|  |     fp = open(path, 'rb') | ||||||
|  |     first8 = fp.read(8) | ||||||
|  |     fp.seek(0) | ||||||
|  |     if first8[:2] == b'PK': | ||||||
|  |         # A zip file, i.e. PyTorch format | ||||||
|  |         return lazy_load_torch_file(fp, path) | ||||||
|  |     elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024: | ||||||
|  |         # Probably safetensors | ||||||
|  |         return lazy_load_safetensors_file(fp, path) | ||||||
|  |     else: | ||||||
|  |         raise ValueError(f"unknown format: {path}") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | In = TypeVar('In') | ||||||
|  | Out = TypeVar('Out') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]: | ||||||
|  |     '''Parallel map, but with backpressure.  If the caller doesn't call `next` | ||||||
|  |     fast enough, this will stop calling `func` at some point rather than | ||||||
|  |     letting results pile up in memory.  Specifically, there is a max of one | ||||||
|  |     output value buffered per thread.''' | ||||||
|  |     with concurrent.futures.ThreadPoolExecutor() as executor: | ||||||
|  |         futures: List[concurrent.futures.Future[Out]] = [] | ||||||
|  |         items_rev = list(iterable)[::-1] | ||||||
|  |         for i in range(min(concurrency, len(items_rev))): | ||||||
|  |             futures.append(executor.submit(func, items_rev.pop())) | ||||||
|  |         while futures: | ||||||
|  |             result = futures.pop(0).result() | ||||||
|  |             if items_rev: | ||||||
|  |                 futures.append(executor.submit(func, items_rev.pop())) | ||||||
|  |             yield result | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def check_vocab_size(params: Params, vocab: Vocab) -> None: | ||||||
|  |     if params.n_vocab != vocab.vocab_size: | ||||||
|  |         # GGMLVocab comes from the same file as the model so shouldn't mismatch: | ||||||
|  |         assert isinstance(vocab, SentencePieceVocab) | ||||||
|  |         if params.n_vocab == vocab.vocab_size_base: | ||||||
|  |             print("Ignoring added_tokens.json since model matches vocab size without it.") | ||||||
|  |             vocab.added_tokens_list = [] | ||||||
|  |             vocab.vocab_size = vocab.vocab_size_base | ||||||
|  |             return | ||||||
|  |         msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}" | ||||||
|  |         if vocab.fname_added_tokens is not None: | ||||||
|  |             msg += f" combined with {vocab.fname_added_tokens}" | ||||||
|  |         msg += f" has {vocab.vocab_size})." | ||||||
|  |         if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None: | ||||||
|  |             msg += f"  Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})." | ||||||
|  |         raise Exception(msg) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class OutputFile: | ||||||
|  |     def __init__(self, fname_out: Path) -> None: | ||||||
|  |         self.fout = open(fname_out, "wb") | ||||||
|  |  | ||||||
|  |     def write_file_header(self, params: Params, file_type: GGMLFileType) -> None: | ||||||
|  |         self.fout.write(b"ggjt"[::-1])  # magic | ||||||
|  |         values = [ | ||||||
|  |             1,  # file version | ||||||
|  |             params.n_vocab, | ||||||
|  |             params.n_embd, | ||||||
|  |             params.n_mult, | ||||||
|  |             params.n_head, | ||||||
|  |             params.n_layer, | ||||||
|  |             params.n_embd // params.n_head,  # rot (obsolete) | ||||||
|  |             file_type.value, | ||||||
|  |         ] | ||||||
|  |         self.fout.write(struct.pack("i" * len(values), *values)) | ||||||
|  |  | ||||||
|  |     def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None: | ||||||
|  |         sname = name.encode('utf-8') | ||||||
|  |         self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type])) | ||||||
|  |         self.fout.write(struct.pack("i" * len(shape), *shape[::-1])) | ||||||
|  |         self.fout.write(sname) | ||||||
|  |         self.fout.seek((self.fout.tell() + 31) & -32) | ||||||
|  |  | ||||||
|  |     def write_vocab(self, vocab: Vocab) -> None: | ||||||
|  |         for text, score in vocab.all_tokens(): | ||||||
|  |             self.fout.write(struct.pack("i", len(text))) | ||||||
|  |             self.fout.write(text) | ||||||
|  |             self.fout.write(struct.pack("f", score)) | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def write_vocab_only(fname_out: Path, vocab: Vocab) -> None: | ||||||
|  |         of = OutputFile(fname_out) | ||||||
|  |         params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0, n_head=1, n_layer=0) | ||||||
|  |         of = OutputFile(fname_out) | ||||||
|  |         of.write_file_header(params, file_type=GGMLFileType.AllF32) | ||||||
|  |         of.write_vocab(vocab) | ||||||
|  |         of.fout.close() | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None: | ||||||
|  |         check_vocab_size(params, vocab) | ||||||
|  |         of = OutputFile(fname_out) | ||||||
|  |         of.write_file_header(params, file_type) | ||||||
|  |         print("Writing vocab...") | ||||||
|  |         of.write_vocab(vocab) | ||||||
|  |  | ||||||
|  |         def do_item(item: Tuple[str, LazyTensor]) -> NDArray: | ||||||
|  |             name, lazy_tensor = item | ||||||
|  |             return lazy_tensor.load().to_ggml().ndarray | ||||||
|  |  | ||||||
|  |         ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) | ||||||
|  |         for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): | ||||||
|  |             size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) | ||||||
|  |             padi = len(str(len(model))) | ||||||
|  |             print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}") | ||||||
|  |             of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) | ||||||
|  |             ndarray.tofile(of.fout) | ||||||
|  |         of.fout.close() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType: | ||||||
|  |     wq_type = model["layers.0.attention.wq.weight"].data_type | ||||||
|  |     if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)): | ||||||
|  |         return GGMLFileType.AllF32 | ||||||
|  |     if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16): | ||||||
|  |         return GGMLFileType.MostlyF16 | ||||||
|  |     name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()} | ||||||
|  |     raise Exception(f"Unexpected combination of types: {name_to_type}") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel: | ||||||
|  |     if "lm_head.weight" in model: | ||||||
|  |         model = convert_transformers_to_orig(model, params) | ||||||
|  |     model = filter_and_sort_tensors(model) | ||||||
|  |  | ||||||
|  |     return model | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: | ||||||
|  |     return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) | ||||||
|  |             for (name, tensor) in model.items()} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def nth_multifile_path(path: Path, n: int) -> Optional[Path]: | ||||||
|  |     '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return | ||||||
|  |     the nth path in the model. | ||||||
|  |     ''' | ||||||
|  |     # Support the following patterns: | ||||||
|  |     patterns: List[Tuple[str, str]] = [ | ||||||
|  |         # - x.00.pth, x.01.pth, etc. | ||||||
|  |         (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'), | ||||||
|  |         # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc. | ||||||
|  |         (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'), | ||||||
|  |         # x.bin, x.bin.1, etc. | ||||||
|  |         (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}') | ||||||
|  |     ] | ||||||
|  |     for regex, replacement in patterns: | ||||||
|  |         if re.search(regex, path.name): | ||||||
|  |             new_path = path.with_name(re.sub(regex, replacement, path.name)) | ||||||
|  |             if new_path.exists(): | ||||||
|  |                 return new_path | ||||||
|  |     return None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def find_multifile_paths(path: Path) -> List[Path]: | ||||||
|  |     '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return | ||||||
|  |     the whole list of paths in the model. | ||||||
|  |     ''' | ||||||
|  |     ret: List[Path] = [] | ||||||
|  |     for i in itertools.count(): | ||||||
|  |         nth_path = nth_multifile_path(path, i) | ||||||
|  |         if nth_path is None: | ||||||
|  |             break | ||||||
|  |         ret.append(nth_path) | ||||||
|  |     if not ret: | ||||||
|  |         # No matches.  This should only happen if the file was named, e.g., | ||||||
|  |         # foo.0, and there was no file named foo.  Oh well, try to process it | ||||||
|  |         # as a single file. | ||||||
|  |         return [path] | ||||||
|  |     return ret | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_some_model(path: Path) -> ModelPlus: | ||||||
|  |     '''Load a model of any supported format.''' | ||||||
|  |     # Be extra-friendly and accept either a file or a directory: | ||||||
|  |     if path.is_dir(): | ||||||
|  |         # Check if it's a set of safetensors files first | ||||||
|  |         files = list(path.glob("model-00001-of-*.safetensors")) | ||||||
|  |         if not files: | ||||||
|  |             # Try the PyTorch patterns too, with lower priority | ||||||
|  |             globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] | ||||||
|  |             files = [file for glob in globs for file in path.glob(glob)] | ||||||
|  |         if not files: | ||||||
|  |             # Try GGML too, but with lower priority, since if both a non-GGML | ||||||
|  |             # model and a GGML model exist in the same directory, we assume the | ||||||
|  |             # latter was converted from the former. | ||||||
|  |             files = list(path.glob("ggml-model*.bin*")) | ||||||
|  |         if not files: | ||||||
|  |             raise Exception(f"Can't find model in directory {path}") | ||||||
|  |         if len(files) > 1: | ||||||
|  |             raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}") | ||||||
|  |         path = files[0] | ||||||
|  |  | ||||||
|  |     paths = find_multifile_paths(path) | ||||||
|  |     models_plus: List[ModelPlus] = [] | ||||||
|  |     for path in paths: | ||||||
|  |         print(f"Loading model file {path}") | ||||||
|  |         models_plus.append(lazy_load_file(path)) | ||||||
|  |  | ||||||
|  |     model_plus = merge_multifile_models(models_plus) | ||||||
|  |     return model_plus | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def filter_and_sort_tensors(model: LazyModel) -> LazyModel: | ||||||
|  |     return {name: model[name] for name in TENSORS_LIST if name in model} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab: | ||||||
|  |     print(f"vocabtype: {vocabtype}") | ||||||
|  |     # Be extra-friendly and accept either a file or a directory.  Also, if it's | ||||||
|  |     # a directory, it might be the model directory, and tokenizer.model might | ||||||
|  |     # be in the parent of that. | ||||||
|  |     if path.is_dir(): | ||||||
|  |         vocab_file = "tokenizer.model" | ||||||
|  |         if vocabtype == 'bpe': | ||||||
|  |           vocab_file = "vocab.json" | ||||||
|  |         path2 = path / vocab_file | ||||||
|  |         # Use `.parent` instead of /.. to handle the symlink case better. | ||||||
|  |         path3 = path.parent / vocab_file | ||||||
|  |         if path2.exists(): | ||||||
|  |             path = path2 | ||||||
|  |         elif path3.exists(): | ||||||
|  |             path = path3 | ||||||
|  |         else: | ||||||
|  |             raise FileNotFoundError( | ||||||
|  |                 f"Could not find tokenizer.model in {path} or its parent; " | ||||||
|  |                 "if it's in another directory, pass the directory as --vocab-dir") | ||||||
|  |     added_tokens_path = path.parent / "added_tokens.json" | ||||||
|  |     print(f"Loading vocab file {path}") | ||||||
|  |     return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None, | ||||||
|  |                               vocabtype) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path: | ||||||
|  |     namestr = { | ||||||
|  |         GGMLFileType.AllF32:    "f32", | ||||||
|  |         GGMLFileType.MostlyF16: "f16", | ||||||
|  |     }[file_type] | ||||||
|  |     ret = model_paths[0].parent / f"ggml-model-{namestr}.bin" | ||||||
|  |     if ret in model_paths: | ||||||
|  |         sys.stderr.write( | ||||||
|  |             f"Error: Default output path ({ret}) would overwrite the input. " | ||||||
|  |             "Please explicitly specify a path using --outfile.\n") | ||||||
|  |         sys.exit(1) | ||||||
|  |     return ret | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def do_dump_model(model_plus: ModelPlus) -> None: | ||||||
|  |     print(f"model_plus.paths = {model_plus.paths!r}") | ||||||
|  |     print(f"model_plus.format = {model_plus.format!r}") | ||||||
|  |     print(f"model_plus.vocab = {model_plus.vocab!r}") | ||||||
|  |     for name, lazy_tensor in model_plus.model.items(): | ||||||
|  |         print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def main(args_in: Optional[List[str]] = None) -> None: | ||||||
|  |     parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") | ||||||
|  |     parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") | ||||||
|  |     parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") | ||||||
|  |     parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") | ||||||
|  |     parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)") | ||||||
|  |     parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") | ||||||
|  |     parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") | ||||||
|  |     parser.add_argument("model", type=Path, | ||||||
|  |                         help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") | ||||||
|  |     parser.add_argument("--vocabtype", default='spm', choices=["spm", "bpe"], help="vocab format (default: spm)") | ||||||
|  |     args = parser.parse_args(args_in) | ||||||
|  |  | ||||||
|  |     vocab: Vocab | ||||||
|  |     if args.dump_single: | ||||||
|  |         model_plus = lazy_load_file(args.model) | ||||||
|  |         do_dump_model(model_plus) | ||||||
|  |     elif args.vocab_only: | ||||||
|  |         vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) | ||||||
|  |         assert args.outfile, "need --outfile if using --vocab-only" | ||||||
|  |         outfile = args.outfile | ||||||
|  |         OutputFile.write_vocab_only(outfile, vocab) | ||||||
|  |         print(f"Wrote {outfile}") | ||||||
|  |     else: | ||||||
|  |         model_plus = load_some_model(args.model) | ||||||
|  |         if args.dump: | ||||||
|  |             do_dump_model(model_plus) | ||||||
|  |             return | ||||||
|  |         if model_plus.vocab is not None and args.vocab_dir is None: | ||||||
|  |             vocab = model_plus.vocab | ||||||
|  |         else: | ||||||
|  |             vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent | ||||||
|  |             vocab = load_vocab(vocab_dir, args.vocabtype) | ||||||
|  |         params = Params.load(model_plus) | ||||||
|  |         model = model_plus.model | ||||||
|  |         model = do_necessary_conversions(model, params) | ||||||
|  |         output_type = pick_output_type(model, args.outtype) | ||||||
|  |         model = convert_to_output_type(model, output_type) | ||||||
|  |         outfile = args.outfile or default_outfile(model_plus.paths, output_type) | ||||||
|  |         OutputFile.write_all(outfile, params, output_type, model, vocab) | ||||||
|  |         print(f"Wrote {outfile}") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     main() | ||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov