diff --git a/.editorconfig b/.editorconfig index 316448c7e9..c90b171f55 100644 --- a/.editorconfig +++ b/.editorconfig @@ -49,6 +49,6 @@ charset = unset trim_trailing_whitespace = unset insert_final_newline = unset -[tools/mtmd/miniaudio.h] +[vendor/miniaudio/miniaudio.h] trim_trailing_whitespace = unset insert_final_newline = unset diff --git a/.github/workflows/build-linux-cross.yml b/.github/workflows/build-linux-cross.yml index dbd31e589b..92dc41f9d7 100644 --- a/.github/workflows/build-linux-cross.yml +++ b/.github/workflows/build-linux-cross.yml @@ -26,12 +26,12 @@ jobs: sudo apt-get install -y --no-install-recommends \ build-essential \ gcc-14-riscv64-linux-gnu \ - g++-14-riscv64-linux-gnu \ - libcurl4-openssl-dev:riscv64 + g++-14-riscv64-linux-gnu - name: Build run: | - cmake -B build -DCMAKE_BUILD_TYPE=Release \ + cmake -B build -DLLAMA_CURL=OFF \ + -DCMAKE_BUILD_TYPE=Release \ -DGGML_OPENMP=OFF \ -DLLAMA_BUILD_EXAMPLES=ON \ -DLLAMA_BUILD_TOOLS=ON \ @@ -72,12 +72,12 @@ jobs: glslc \ gcc-14-riscv64-linux-gnu \ g++-14-riscv64-linux-gnu \ - libvulkan-dev:riscv64 \ - libcurl4-openssl-dev:riscv64 + libvulkan-dev:riscv64 - name: Build run: | - cmake -B build -DCMAKE_BUILD_TYPE=Release \ + cmake -B build -DLLAMA_CURL=OFF \ + -DCMAKE_BUILD_TYPE=Release \ -DGGML_VULKAN=ON \ -DGGML_OPENMP=OFF \ -DLLAMA_BUILD_EXAMPLES=ON \ @@ -118,12 +118,12 @@ jobs: build-essential \ glslc \ crossbuild-essential-arm64 \ - libvulkan-dev:arm64 \ - libcurl4-openssl-dev:arm64 + libvulkan-dev:arm64 - name: Build run: | - cmake -B build -DCMAKE_BUILD_TYPE=Release \ + cmake -B build -DLLAMA_CURL=OFF \ + -DCMAKE_BUILD_TYPE=Release \ -DGGML_VULKAN=ON \ -DGGML_OPENMP=OFF \ -DLLAMA_BUILD_EXAMPLES=ON \ @@ -163,12 +163,12 @@ jobs: sudo apt-get install -y --no-install-recommends \ build-essential \ gcc-14-powerpc64le-linux-gnu \ - g++-14-powerpc64le-linux-gnu \ - libcurl4-openssl-dev:ppc64el + g++-14-powerpc64le-linux-gnu - name: Build run: | - cmake -B build -DCMAKE_BUILD_TYPE=Release \ + cmake -B build -DLLAMA_CURL=OFF \ + -DCMAKE_BUILD_TYPE=Release \ -DGGML_OPENMP=OFF \ -DLLAMA_BUILD_EXAMPLES=ON \ -DLLAMA_BUILD_TOOLS=ON \ @@ -209,12 +209,12 @@ jobs: glslc \ gcc-14-powerpc64le-linux-gnu \ g++-14-powerpc64le-linux-gnu \ - libvulkan-dev:ppc64el \ - libcurl4-openssl-dev:ppc64el + libvulkan-dev:ppc64el - name: Build run: | - cmake -B build -DCMAKE_BUILD_TYPE=Release \ + cmake -B build -DLLAMA_CURL=OFF \ + -DCMAKE_BUILD_TYPE=Release \ -DGGML_VULKAN=ON \ -DGGML_OPENMP=OFF \ -DLLAMA_BUILD_EXAMPLES=ON \ diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index dac4cc770e..564af1448f 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -58,23 +58,20 @@ add_library(${TARGET} STATIC arg.cpp arg.h base64.hpp - chat.cpp - chat.h chat-parser.cpp chat-parser.h + chat.cpp + chat.h common.cpp common.h console.cpp console.h - json-schema-to-grammar.cpp - json.hpp - json-partial.h json-partial.cpp + json-partial.h + json-schema-to-grammar.cpp llguidance.cpp log.cpp log.h - minja/chat-template.hpp - minja/minja.hpp ngram-cache.cpp ngram-cache.h regex-partial.cpp @@ -147,7 +144,7 @@ if (LLAMA_LLGUIDANCE) set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance ${LLGUIDANCE_PLATFORM_LIBS}) endif () -target_include_directories(${TARGET} PUBLIC .) +target_include_directories(${TARGET} PUBLIC . ../vendor) target_compile_features (${TARGET} PUBLIC cxx_std_17) target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads) diff --git a/common/arg.cpp b/common/arg.cpp index 8371774f48..0cca464544 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1,10 +1,11 @@ -#include "gguf.h" // for reading GGUF splits #include "arg.h" +#include "chat.h" #include "common.h" +#include "gguf.h" // for reading GGUF splits +#include "json-schema-to-grammar.h" #include "log.h" #include "sampling.h" -#include "chat.h" // fix problem with std::min and std::max #if defined(_WIN32) @@ -15,6 +16,9 @@ #include #endif +#define JSON_ASSERT GGML_ASSERT +#include + #include #include #include @@ -34,8 +38,6 @@ #include #endif -#include "json-schema-to-grammar.h" - using json = nlohmann::ordered_json; std::initializer_list mmproj_examples = { diff --git a/common/chat-parser.cpp b/common/chat-parser.cpp index c314b8b519..65b664cb37 100644 --- a/common/chat-parser.cpp +++ b/common/chat-parser.cpp @@ -154,9 +154,10 @@ bool common_chat_msg_parser::try_parse_reasoning(const std::string & start_think if (!rest.empty()) { handle_reasoning(rest, /* closed */ !is_partial()); } - if (!syntax_.thinking_forced_open) { - throw common_chat_msg_partial_exception(end_think); - } + // Allow unclosed thinking tags, for now (https://github.com/ggml-org/llama.cpp/issues/13812, https://github.com/ggml-org/llama.cpp/issues/13877) + // if (!syntax_.thinking_forced_open) { + // throw common_chat_msg_partial_exception(end_think); + // } return true; } } diff --git a/common/chat-parser.h b/common/chat-parser.h index 5d53f2df1d..7ee355056b 100644 --- a/common/chat-parser.h +++ b/common/chat-parser.h @@ -2,9 +2,10 @@ #include "chat.h" #include "json-partial.h" -#include "json.hpp" #include "regex-partial.h" +#include + #include #include #include diff --git a/common/chat.cpp b/common/chat.cpp index 7584639b07..f1ab4c85a9 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1,13 +1,14 @@ #include "chat.h" #include "chat-parser.h" #include "common.h" +#include "json-partial.h" #include "json-schema-to-grammar.h" #include "log.h" -#include "json-partial.h" -#include "minja/chat-template.hpp" -#include "minja/minja.hpp" #include "regex-partial.h" +#include +#include + #include #include #include @@ -16,7 +17,6 @@ #include #include - static std::string format_time(const std::chrono::system_clock::time_point & now, const std::string & format) { auto time = std::chrono::system_clock::to_time_t(now); auto local_time = *std::localtime(&time); diff --git a/common/common.cpp b/common/common.cpp index 2afa9b2d64..d80c47bc39 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -903,13 +903,16 @@ struct common_init_result common_init_from_params(common_params & params) { ok = false; } - if (llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have an EOS token, reranking will not work\n", __func__); - ok = false; - } + bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; + bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; - if (llama_vocab_sep(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); + if (!has_eos && !has_sep) { + LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__); + ok = false; + } else if (!has_eos) { + LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); + } else if (!has_sep) { + LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); ok = false; } diff --git a/common/json-partial.cpp b/common/json-partial.cpp index 7591a8e4cf..d9d9169989 100644 --- a/common/json-partial.cpp +++ b/common/json-partial.cpp @@ -1,9 +1,10 @@ -#include -#include "ggml.h" -#include "log.h" -#include +#include "json-partial.h" -#include +#include "log.h" + +#include + +#include using json = nlohmann::ordered_json; diff --git a/common/json-partial.h b/common/json-partial.h index 854db6a3ae..f63356dc48 100644 --- a/common/json-partial.h +++ b/common/json-partial.h @@ -1,5 +1,6 @@ #pragma once -#include + +#include // Healing marker (empty if the JSON was fully parsed / wasn't healed). struct common_healing_marker { diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index 5b3059c2f7..d38a74f95c 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -1,8 +1,9 @@ #include "json-schema-to-grammar.h" #include "common.h" +#include + #include -#include #include #include #include diff --git a/common/json-schema-to-grammar.h b/common/json-schema-to-grammar.h index 4613f5d9f9..362991b542 100644 --- a/common/json-schema-to-grammar.h +++ b/common/json-schema-to-grammar.h @@ -1,9 +1,9 @@ #pragma once -#include "ggml.h" -// Change JSON_ASSERT from assert() to GGML_ASSERT: -#define JSON_ASSERT GGML_ASSERT -#include "json.hpp" +#include + +#include +#include std::string json_schema_to_grammar(const nlohmann::ordered_json & schema, bool force_gbnf = false); diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index a015ecee08..ab0f0e0ea0 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -423,19 +423,19 @@ class ModelBase: try: # for security reason, we don't allow loading remote code by default # if a model need remote code, we will fallback to config.json - return AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict() + config = AutoConfig.from_pretrained(dir_model, trust_remote_code=False).to_dict() except Exception as e: logger.warning(f"Failed to load model config from {dir_model}: {e}") logger.warning("Trying to load config.json instead") with open(dir_model / "config.json", "r", encoding="utf-8") as f: config = json.load(f) - if "llm_config" in config: - # rename for InternVL - config["text_config"] = config["llm_config"] - if "thinker_config" in config: - # rename for Qwen2.5-Omni - config["text_config"] = config["thinker_config"]["text_config"] - return config + if "llm_config" in config: + # rename for InternVL + config["text_config"] = config["llm_config"] + if "thinker_config" in config: + # rename for Qwen2.5-Omni + config["text_config"] = config["thinker_config"]["text_config"] + return config @classmethod def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: @@ -523,15 +523,15 @@ class TextModel(ModelBase): self.gguf_writer.add_context_length(n_ctx) logger.info(f"gguf: context length = {n_ctx}") - if (n_embd := self.find_hparam(["hidden_size", "n_embd"], optional=True)) is not None: + if (n_embd := self.find_hparam(["hidden_size", "n_embd", "dim"], optional=True)) is not None: self.gguf_writer.add_embedding_length(n_embd) logger.info(f"gguf: embedding length = {n_embd}") - if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None: + if (n_ff := self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"], optional=True)) is not None: self.gguf_writer.add_feed_forward_length(n_ff) logger.info(f"gguf: feed forward length = {n_ff}") - if (n_head := self.find_hparam(["num_attention_heads", "n_head"], optional=True)) is not None: + if (n_head := self.find_hparam(["num_attention_heads", "n_head", "n_heads"], optional=True)) is not None: self.gguf_writer.add_head_count(n_head) logger.info(f"gguf: head count = {n_head}") @@ -674,12 +674,12 @@ class TextModel(ModelBase): if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed": # ref: https://huggingface.co/tiiuae/falcon-7b res = "falcon" - if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e": - # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base - res = "falcon3" if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": # ref: https://huggingface.co/BAAI/bge-small-en-v1.5 res = "bert-bge" + if chkhsh == "9d032fcbd5501f4a38150912590928bfb36091efb5df11b8e2124b0390e3fb1e": + # ref: https://huggingface.co/tiiuae/Falcon3-7B-Base + res = "falcon3" if chkhsh == "8e62295832751ca1e8f92f2226f403dea30dc5165e448b5bfa05af5340c64ec7": # ref: https://huggingface.co/BAAI/bge-large-zh-v1.5 res = "bert-bge-large" @@ -731,9 +731,6 @@ class TextModel(ModelBase): if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code res = "jina-v2-code" - if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b" or chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516": - # ref: https://huggingface.co/THUDM/glm-4-9b-chat - res = "chatglm-bpe" if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": # ref: https://huggingface.co/LumiOpen/Viking-7B res = "viking" @@ -764,9 +761,6 @@ class TextModel(ModelBase): if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450": # ref: https://huggingface.co/facebook/chameleon-7b res = "chameleon" - if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35": - # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0 - res = "minerva-7b" if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65": # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base res = "roberta-bpe" @@ -797,15 +791,24 @@ class TextModel(ModelBase): if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406": # ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct res = "llama4" - if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2": - # ref: https://huggingface.co/THUDM/glm-4-9b-hf - res = "glm4" if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3": # ref: https://huggingface.co/mistral-community/pixtral-12b res = "pixtral" if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec": # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base res = "seed-coder" + if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b": + # ref: https://huggingface.co/THUDM/glm-4-9b-chat + res = "chatglm-bpe" + if chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516": + # ref: https://huggingface.co/THUDM/glm-4-9b-chat + res = "chatglm-bpe" + if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2": + # ref: https://huggingface.co/THUDM/glm-4-9b-hf + res = "glm4" + if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35": + # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0 + res = "minerva-7b" if res is None: logger.warning("\n") @@ -1044,6 +1047,10 @@ class TextModel(ModelBase): special_vocab.chat_template = "rwkv-world" # hack: Add '\n\n' as the EOT token to make it chat normally special_vocab._set_special_token("eot", 261) + # hack: Override these as they have already been set (incorrectly) + special_vocab.special_token_ids["bos"] = 0 + special_vocab.special_token_ids["eos"] = 0 + special_vocab.add_to_gguf(self.gguf_writer) def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int): @@ -1207,7 +1214,7 @@ class MmprojModel(ModelBase): self.gguf_writer.add_audio_block_count(self.find_aparam(self.n_block_keys)) self.gguf_writer.add_audio_head_count(self.find_aparam(["num_attention_heads"])) - else: + if not self.has_vision_encoder and not self.has_audio_encoder: raise ValueError("MmprojModel must have either vision or audio encoder") def write_vocab(self): @@ -1841,7 +1848,8 @@ class StableLMModel(TextModel): "MistralForCausalLM", "MixtralForCausalLM", "VLlama3ForCausalLM", - "LlavaForConditionalGeneration") + "LlavaForConditionalGeneration", + "LlamaModel") class LlamaModel(TextModel): model_arch = gguf.MODEL_ARCH.LLAMA undo_permute = True @@ -1921,6 +1929,8 @@ class LlamaModel(TextModel): if is_vision_tensor: return [] # skip vision tensors + elif self.hf_arch == "LlamaModel": + name = "model." + name elif name.startswith("model.text_model"): name = name.replace("text_model.", "") # for SmolVLM elif name.startswith("language_model."): @@ -2169,6 +2179,9 @@ class Llama4VisionModel(MmprojModel): # process vision tensors if "positional_embedding_vlm" in name and ".weight" not in name: name += ".weight" + if "multi_modal_projector.linear_1" in name: + # despite the name with number postfix, this is a single fully connected layer + return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC], data_torch)] return [(self.map_tensor_name(name), data_torch)] return [] @@ -3676,7 +3689,7 @@ class InternLM3Model(TextModel): return [(self.map_tensor_name(name), data_torch)] -@ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel") +@ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel", "BertForSequenceClassification") class BertModel(TextModel): model_arch = gguf.MODEL_ARCH.BERT @@ -3684,11 +3697,21 @@ class BertModel(TextModel): super().__init__(*args, **kwargs) self.vocab_size = None + if cls_out_labels := self.hparams.get("id2label"): + if len(cls_out_labels) == 2 and cls_out_labels[0] == "LABEL_0": + # Remove dummy labels added by AutoConfig + cls_out_labels = None + self.cls_out_labels = cls_out_labels + def set_gguf_parameters(self): super().set_gguf_parameters() self.gguf_writer.add_causal_attention(False) self._try_set_pooling_type() + if self.cls_out_labels: + key_name = gguf.Keys.Classifier.OUTPUT_LABELS.format(arch = gguf.MODEL_ARCH_NAMES[self.model_arch]) + self.gguf_writer.add_array(key_name, [v for k, v in sorted(self.cls_out_labels.items())]) + def set_vocab(self): tokens, toktypes, tokpre = self.get_vocab_base() self.vocab_size = len(tokens) @@ -3739,6 +3762,14 @@ class BertModel(TextModel): if name.startswith("cls.seq_relationship"): return [] + if self.cls_out_labels: + # For BertForSequenceClassification (direct projection layer) + if name == "classifier.weight": + name = "classifier.out_proj.weight" + + if name == "classifier.bias": + name = "classifier.out_proj.bias" + return [(self.map_tensor_name(name), data_torch)] def _xlmroberta_tokenizer_init(self) -> None: @@ -3758,44 +3789,93 @@ class BertModel(TextModel): from sentencepiece import sentencepiece_model_pb2 as model tokenizer_path = self.dir_model / 'sentencepiece.bpe.model' + + tokenizer_json = {} + tokenizer_config_json = {} if not tokenizer_path.is_file(): - raise FileNotFoundError(f"File not found: {tokenizer_path}") + tokenizer_path = self.dir_model / 'tokenizer.json' + tokenizer_config_path = self.dir_model / 'tokenizer_config.json' - sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] - sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) - assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM + if not tokenizer_path.is_file(): + raise FileNotFoundError(f"File not found: {tokenizer_path}") - add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces - precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap + from base64 import b64decode + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(self.dir_model) - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) + with open(tokenizer_path, "r", encoding="utf-8") as fp: + tokenizer_json = json.load(fp) - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + if tokenizer_config_path.is_file(): + with open(tokenizer_config_path, "r", encoding="utf-8") as fp: + tokenizer_config_json = json.load(fp) + + add_prefix = tokenizer.add_prefix_space + remove_whitespaces = tokenizer.clean_up_tokenization_spaces + precompiled_charsmap = b64decode(tokenizer_json["normalizer"]["precompiled_charsmap"]) + + vocab_size = self.hparams.get("vocab_size", tokenizer.vocab_size) + else: + sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] + sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) + assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM + + add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix + remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces + precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap + + tokenizer = SentencePieceProcessor() + tokenizer.LoadFromFile(str(tokenizer_path)) + + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] scores: list[float] = [-10000.0] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) + if isinstance(tokenizer, SentencePieceProcessor): + for token_id in range(tokenizer.vocab_size()): + piece = tokenizer.IdToPiece(token_id) + text = piece.encode("utf-8") + score = tokenizer.GetScore(token_id) - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE + toktype = SentencePieceTokenTypes.NORMAL + if tokenizer.IsUnknown(token_id): + toktype = SentencePieceTokenTypes.UNKNOWN + elif tokenizer.IsControl(token_id): + toktype = SentencePieceTokenTypes.CONTROL + elif tokenizer.IsUnused(token_id): + toktype = SentencePieceTokenTypes.UNUSED + elif tokenizer.IsByte(token_id): + toktype = SentencePieceTokenTypes.BYTE - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype + tokens[token_id] = text + scores[token_id] = score + toktypes[token_id] = toktype + else: + added_vocab = tokenizer.get_added_vocab() + unk_token = tokenizer_config_json.get("unk_token") + unk_token_id = added_vocab.get(unk_token, tokenizer_json["model"].get("unk_id", 3)) + + for token_id in range(vocab_size): + piece = tokenizer._convert_id_to_token(token_id) + text = piece.encode("utf-8") + score = tokenizer_json["model"]["vocab"][token_id][1] + + toktype = SentencePieceTokenTypes.NORMAL + if token_id == unk_token_id: + toktype = SentencePieceTokenTypes.UNKNOWN + elif token_id in tokenizer.all_special_ids: + toktype = SentencePieceTokenTypes.CONTROL + elif token_id in added_vocab.values(): + toktype = SentencePieceTokenTypes.USER_DEFINED + # No reliable way to detect this, but jina doesn't have any + # elif tokenizer.IsByte(token_id): + # toktype = SentencePieceTokenTypes.BYTE + + tokens[token_id] = text + scores[token_id] = score + toktypes[token_id] = toktype if vocab_size > len(tokens): pad_count = vocab_size - len(tokens) @@ -3805,15 +3885,16 @@ class BertModel(TextModel): scores.append(-1000.0) toktypes.append(SentencePieceTokenTypes.UNUSED) - # realign tokens (see HF tokenizer code) - tokens = [b'', b'', b'', b''] + tokens[3:-1] - scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1] - toktypes = [ - SentencePieceTokenTypes.CONTROL, - SentencePieceTokenTypes.CONTROL, - SentencePieceTokenTypes.CONTROL, - SentencePieceTokenTypes.UNKNOWN, - ] + toktypes[3:-1] + if isinstance(tokenizer, SentencePieceProcessor): + # realign tokens (see HF tokenizer code) + tokens = [b'', b'', b'', b''] + tokens[3:-1] + scores = [0.0, 0.0, 0.0, 0.0] + scores[3:-1] + toktypes = [ + SentencePieceTokenTypes.CONTROL, + SentencePieceTokenTypes.CONTROL, + SentencePieceTokenTypes.CONTROL, + SentencePieceTokenTypes.UNKNOWN, + ] + toktypes[3:-1] self.gguf_writer.add_tokenizer_model("t5") self.gguf_writer.add_tokenizer_pre("default") @@ -3833,7 +3914,27 @@ class BertModel(TextModel): self.gguf_writer.add_add_eos_token(True) -@ModelBase.register("RobertaModel") +@ModelBase.register("DistilBertModel", "DistilBertForMaskedLM", "DistilBertForSequenceClassification") +class DistilBertModel(BertModel): + model_arch = gguf.MODEL_ARCH.BERT + + def set_gguf_parameters(self): + self.gguf_writer.add_layer_norm_eps(1e-12) + logger.info("gguf: layer norm epsilon = 1e-12") + super().set_gguf_parameters() + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + if name.startswith("distilbert."): + name = name[11:] + + # These layers act as MLM head, so we don't need them + if name.startswith("vocab_"): + return [] + + return super().modify_tensors(data_torch, name, bid) + + +@ModelBase.register("RobertaModel", "RobertaForSequenceClassification") class RobertaModel(BertModel): model_arch = gguf.MODEL_ARCH.BERT diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 5993a4c983..2f733f0973 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -1,28 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -# This script downloads the tokenizer models of the specified models from Huggingface and -# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py -# -# This is necessary in order to analyze the type of pre-tokenizer used by the model and -# provide the necessary information to llama.cpp via the GGUF header in order to implement -# the same pre-tokenizer. -# -# ref: https://github.com/ggml-org/llama.cpp/pull/6920 -# -# Instructions: -# -# - Add a new model to the "models" list -# - Run the script with your huggingface token: -# -# python3 convert_hf_to_gguf_update.py -# -# - The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated -# - Update llama.cpp with the new pre-tokenizer if necessary -# -# TODO: generate tokenizer tests for llama.cpp -# - import logging import os import pathlib @@ -32,6 +10,7 @@ import requests import sys import json import shutil +import argparse from hashlib import sha256 from enum import IntEnum, auto @@ -41,6 +20,11 @@ logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("convert_hf_to_gguf_update") sess = requests.Session() +convert_py_pth = pathlib.Path("convert_hf_to_gguf.py") +convert_py = convert_py_pth.read_text(encoding="utf-8") +hf_token_pth = pathlib.Path.home() / ".cache" / "huggingface" / "token" +hf_token = hf_token_pth.read_text(encoding="utf-8").strip() if hf_token_pth.exists() else None + class TOKENIZER_TYPE(IntEnum): SPM = auto() @@ -49,20 +33,49 @@ class TOKENIZER_TYPE(IntEnum): UGM = auto() +DOC_STRING = """ +This script downloads the tokenizer models of the specified models from Huggingface and +generates the get_vocab_base_pre() function for convert_hf_to_gguf.py + +/!\\ It is intended to be used by contributors and is not meant to be run by end users + +This is necessary in order to analyze the type of pre-tokenizer used by the model and +provide the necessary information to llama.cpp via the GGUF header in order to implement +the same pre-tokenizer. + +ref: https://github.com/ggml-org/llama.cpp/pull/6920 + +Instructions: + +- Add a new model to the "models" list +- Run the script with your huggingface token + By default, token will be read from ~/.cache/huggingface/token +- The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated +- Update llama.cpp with the new pre-tokenizer if necessary +""" +# TODO: generate tokenizer tests for llama.cpp + +parser = argparse.ArgumentParser(description=DOC_STRING, formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument( + "--full", action="store_true", + help="download full list of models - make sure you have access to all of them", +) +parser.add_argument( + "hf_token", + help="optional HF token", + nargs="?", +) +args = parser.parse_args() +hf_token = args.hf_token if args.hf_token is not None else hf_token + +if hf_token is None: + logger.error("HF token is required. Please provide it as an argument or set it in ~/.cache/huggingface/token") + sys.exit(1) + # TODO: this string has to exercise as much pre-tokenizer functionality as possible # will be updated with time - contributions welcome CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' -if len(sys.argv) == 2: - token = sys.argv[1] - if not token.startswith("hf_"): - logger.info("Huggingface token seems invalid") - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) -else: - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) - # TODO: add models here, base models preferred models = [ {"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", }, @@ -103,7 +116,6 @@ models = [ {"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", }, {"name": "phi-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/microsoft/phi-2", }, {"name": "chameleon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/facebook/chameleon-7b", }, - {"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", }, {"name": "roberta-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"}, {"name": "gigachat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"}, {"name": "megrez", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"}, @@ -114,11 +126,19 @@ models = [ {"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", }, {"name": "bailingmoe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-lite", }, {"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", }, - {"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", }, {"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", }, {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", }, ] +# some models are known to be broken upstream, so we will skip them as exceptions +pre_computed_hashes = [ + # chatglm-bpe has 2 hashes, why? + {"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b"}, + {"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516"}, + {"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", "chkhsh": "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2"}, + {"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", "chkhsh": "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35"}, +] + def download_file_with_auth(url, token, save_path): headers = {"Authorization": f"Bearer {token}"} @@ -169,9 +189,29 @@ def download_model(model): if os.path.isfile(save_path): logger.info(f"{name}: File {save_path} already exists - skipping") continue - download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path) + download_file_with_auth(f"{repo}/resolve/main/{file}", hf_token, save_path) +# get list of existing models and chkhsh from the convert_hf_to_gguf.py file +# returns mapping res --> chkhsh +def get_existing_models(convert_py): + pattern = r'if chkhsh == "([a-f0-9]{64})":\s*\n\s*.*\s*res = "([^"]+)"' + matches = re.findall(pattern, convert_py) + output = {} + for chkhsh, res in matches: + output[res] = chkhsh + return output + + +existing_models = {} +all_models = models.copy() +if not args.full: + # Filter out models that already exist in convert_hf_to_gguf.py + existing_models = get_existing_models(convert_py) + all_models = models.copy() + models = [model for model in all_models if model["name"] not in existing_models] + +logging.info(f"Downloading {len(models)} models...") for model in models: try: download_model(model) @@ -182,9 +222,10 @@ for model in models: # generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function: src_ifs = "" -for model in models: +for model in [*all_models, *pre_computed_hashes]: name = model["name"] tokt = model["tokt"] + chkhsh = model.get("chkhsh") if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM: continue @@ -195,35 +236,44 @@ for model in models: continue # create the tokenizer - try: - if name == "t5": - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) - else: - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") - except OSError as e: - logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}") - continue # Skip to the next model if the tokenizer can't be loaded + if chkhsh is not None: + # if the model has a pre-computed hash, use it + logger.info(f"Using pre-computed hash for model {name}: {chkhsh}") + elif name in existing_models: + # if the model already exists in convert_hf_to_gguf.py, skip compute hash + chkhsh = existing_models[name] + else: + # otherwise, compute the hash of the tokenizer + try: + logger.info(f"Loading tokenizer from {f'models/tokenizers/{name}'}...") + if name == "t5": + tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) + else: + tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") + except OSError as e: + logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}") + continue # Skip to the next model if the tokenizer can't be loaded - chktok = tokenizer.encode(CHK_TXT) - chkhsh = sha256(str(chktok).encode()).hexdigest() + chktok = tokenizer.encode(CHK_TXT) + chkhsh = sha256(str(chktok).encode()).hexdigest() - logger.info(f"model: {name}") - logger.info(f"tokt: {tokt}") - logger.info(f"repo: {model['repo']}") - logger.info(f"chktok: {chktok}") - logger.info(f"chkhsh: {chkhsh}") + logger.info(f"model: {name}") + logger.info(f"tokt: {tokt}") + logger.info(f"repo: {model['repo']}") + logger.info(f"chktok: {chktok}") + logger.info(f"chkhsh: {chkhsh}") - # print the "pre_tokenizer" content from the tokenizer.json - with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f: - cfg = json.load(f) - normalizer = cfg["normalizer"] - logger.info("normalizer: " + json.dumps(normalizer, indent=4)) - pre_tokenizer = cfg["pre_tokenizer"] - logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4)) - if "ignore_merges" in cfg["model"]: - logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)) + # print the "pre_tokenizer" content from the tokenizer.json + with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f: + cfg = json.load(f) + normalizer = cfg["normalizer"] + logger.info("normalizer: " + json.dumps(normalizer, indent=4)) + pre_tokenizer = cfg["pre_tokenizer"] + logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4)) + if "ignore_merges" in cfg["model"]: + logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)) - logger.info("") + logger.info("") src_ifs += f" if chkhsh == \"{chkhsh}\":\n" src_ifs += f" # ref: {model['repo']}\n" @@ -271,8 +321,6 @@ src_func = f""" return res """ -convert_py_pth = pathlib.Path("convert_hf_to_gguf.py") -convert_py = convert_py_pth.read_text(encoding="utf-8") convert_py = re.sub( r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)", lambda m: m.group(1) + src_func + m.group(3), @@ -288,7 +336,7 @@ logger.info("+++ convert_hf_to_gguf.py was updated") tests = [ "ied 4 ½ months", - "Führer", + "Äpfel", "", " ", " ", @@ -367,6 +415,10 @@ for model in models: logger.error(f"Failed to load tokenizer for model {name}. Error: {e}") continue # Skip this model and continue with the next one in the loop + if not os.path.exists(f"models/ggml-vocab-{name}.gguf"): + logger.info(f"Skip vocab files for model {name}, no GGUF file found") + continue + with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f: for text in tests: f.write(f"{text}") diff --git a/docs/build.md b/docs/build.md index c9027c0b58..32717a793f 100644 --- a/docs/build.md +++ b/docs/build.md @@ -63,6 +63,7 @@ cmake --build build --config Release cmake --preset x64-windows-llvm-release cmake --build build-x64-windows-llvm-release ``` +- Curl usage is enabled by default and can be turned off with `-DLLAMA_CURL=OFF`. Otherwise you need to install development libraries for libcurl. ## BLAS Build diff --git a/examples/parallel/README.md b/examples/parallel/README.md index ece3a66416..2468a30d22 100644 --- a/examples/parallel/README.md +++ b/examples/parallel/README.md @@ -4,7 +4,7 @@ Simplified simulation of serving incoming requests in parallel ## Example -Generate 128 client requests (`-ns 128`), simulating 8 concurrent clients (`-np 8`). The system prompt is shared (`-pps`), meaning that it is computed once at the start. The client requests consist of 10 junk questions (`-j 10`) followed by the actual question. +Generate 128 client requests (`-ns 128`), simulating 8 concurrent clients (`-np 8`). The system prompt is shared (`-pps`), meaning that it is computed once at the start. The client requests consist of up to 10 junk questions (`--junk 10`) followed by the actual question. ```bash llama-parallel -m model.gguf -np 8 -ns 128 --top-k 1 -pps --junk 10 -c 16384 diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index acb1301a2b..d7b269df0d 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -315,7 +315,10 @@ int main(int argc, char ** argv) { } else { client.prompt += k_system; } - for (int i = 0; i < n_junk; ++i) { + + const int n_junk_cur = rand() % n_junk; + + for (int i = 0; i < n_junk_cur; ++i) { const int r = rand() % k_questions.size(); client.prompt += "User:\n" + k_questions[r] + "\nAssistant:\n " + k_answers[r] + "\n"; } @@ -340,7 +343,7 @@ int main(int argc, char ** argv) { client.n_decoded = 0; client.i_batch = batch.n_tokens - 1; - LOG_INF("\033[31mClient %3d, seq %4d, started decoding ...\033[0m\n", client.id, client.seq_id); + LOG_INF("\033[31mClient %3d, seq %4d, junk = %4d, started decoding ...\033[0m\n", client.id, client.seq_id, n_junk_cur); g_seq_id += 1; @@ -359,7 +362,9 @@ int main(int argc, char ** argv) { // process in chunks of params.n_batch int32_t n_batch = params.n_batch; - for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { + int32_t i_next = 0; + + for (int32_t i = 0; i < batch.n_tokens; i = i_next) { // experiment: process in powers of 2 //if (i + n_batch > (int32_t) batch.n_tokens && n_batch > 32) { // n_batch /= 2; @@ -367,7 +372,7 @@ int main(int argc, char ** argv) { // continue; //} - const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); + const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i); llama_batch batch_view = { n_tokens, @@ -387,19 +392,24 @@ int main(int argc, char ** argv) { return 1; } - LOG_ERR("%s : failed to decode the batch, retrying with n_batch = %d\n", __func__, n_batch / 2); + LOG_WRN("%s : failed to decode the batch, retrying with n_batch = %d\n", __func__, n_batch / 2); n_cache_miss += 1; // retry with half the batch size to try to find a free slot in the KV cache n_batch /= 2; - i -= n_batch; continue; } LOG_DBG("%s : decoded batch of %d tokens\n", __func__, n_tokens); + // move the head of the batch forward with the number of tokens we just processed + i_next = i + n_tokens; + + // on successful decode, restore the original batch size + n_batch = params.n_batch; + for (auto & client : clients) { if (client.i_batch < (int) i || client.i_batch >= (int) (i + n_tokens)) { continue; diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp index 347ea4a698..5ac881b45e 100644 --- a/examples/passkey/passkey.cpp +++ b/examples/passkey/passkey.cpp @@ -133,9 +133,8 @@ int main(int argc, char ** argv) { const int ib = i/n_batch - 1; const int bd = n_batch_grp*(n_grp - 1); - llama_kv_self_seq_add (ctx, 0, n_past - n_batch, n_past, ib*bd); - llama_kv_self_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp); - llama_kv_self_update (ctx); + llama_kv_self_seq_add(ctx, 0, n_past - n_batch, n_past, ib*bd); + llama_kv_self_seq_div(ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp); n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1; } @@ -169,8 +168,6 @@ int main(int argc, char ** argv) { llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard); llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard); - //llama_kv_self_defrag (ctx); - llama_kv_self_update (ctx); n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1; @@ -200,8 +197,6 @@ int main(int argc, char ** argv) { llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard); llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard); - //llama_kv_self_defrag (ctx); - llama_kv_self_update (ctx); n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1; } diff --git a/ggml/cmake/common.cmake b/ggml/cmake/common.cmake index 1976d0ae9b..bb1ec9b37a 100644 --- a/ggml/cmake/common.cmake +++ b/ggml/cmake/common.cmake @@ -24,3 +24,28 @@ function(ggml_get_flags CCID CCVER) set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) endfunction() + +function(ggml_get_system_arch) + if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR + CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + set(GGML_SYSTEM_ARCH "ARM" PARENT_SCOPE) + elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR + CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) + set(GGML_SYSTEM_ARCH "x86" PARENT_SCOPE) + elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR + "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + set(GGML_SYSTEM_ARCH "PowerPC" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + set(GGML_SYSTEM_ARCH "loongarch64" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") + set(GGML_SYSTEM_ARCH "riscv64" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") + set(GGML_SYSTEM_ARCH "s390x" PARENT_SCOPE) + else() + set(GGML_SYSTEM_ARCH "UNKNOWN" PARENT_SCOPE) + endif() +endfunction() diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index ddea5ad389..5681ecddba 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -109,6 +109,8 @@ if (MSVC) else () set(CMAKE_GENERATOR_PLATFORM_LWR "") endif () +ggml_get_system_arch() +message(STATUS "GGML_SYSTEM_ARCH: ${GGML_SYSTEM_ARCH}") if (NOT MSVC) if (GGML_STATIC) @@ -287,16 +289,20 @@ if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") endif() - ggml_add_cpu_backend_variant(x64) - ggml_add_cpu_backend_variant(sse42 SSE42) - ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) - ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) - ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) - ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) - ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) - if (NOT MSVC) - # MSVC doesn't support AMX - ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + if (GGML_SYSTEM_ARCH STREQUAL "x86") + ggml_add_cpu_backend_variant(x64) + ggml_add_cpu_backend_variant(sse42 SSE42) + ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) + ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) + ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) + if (NOT MSVC) + # MSVC doesn't support AMX + ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + endif() + else() + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported on ${GGML_SYSTEM_ARCH}") endif() elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index 1f40f10e87..b1050ad59c 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -1340,7 +1340,10 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { // allocate graph if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { // the re-allocation may cause the split inputs to be moved to a different address - ggml_backend_sched_synchronize(sched); + // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy + for (int i = 0; i < sched->n_backends; i++) { + ggml_backend_synchronize(sched->backends[i]); + } #ifndef NDEBUG GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); #endif @@ -1564,7 +1567,6 @@ bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgra ggml_backend_sched_split_graph(sched, graph); - if (!ggml_backend_sched_alloc_splits(sched)) { return false; } @@ -1598,9 +1600,12 @@ void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { for (int i = 0; i < sched->n_backends; i++) { ggml_backend_synchronize(sched->backends[i]); } - // reset the current copy to 0 so that the graphs will be similar during generation - // necessary for CUDA graphs - sched->cur_copy = 0; + if (!sched->is_alloc) { + // if the graph is not already allocated, always use copy 0 after a synchronization + // this ensures that during generation the same copy is used every time, + // which avoids changes in the graph that could cause CUDA or other graphs to be disabled + sched->cur_copy = 0; + } } void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index bf4fe79a95..b3237eeadd 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -82,13 +82,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind) endif() - if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR - CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) - + if (GGML_SYSTEM_ARCH STREQUAL "ARM") message(STATUS "ARM detected") - if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") message(FATAL_ERROR "MSVC is not supported for ARM, use clang") else() @@ -170,12 +165,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endforeach() endif() endif() - elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) - + elseif (GGML_SYSTEM_ARCH STREQUAL "x86") message(STATUS "x86 detected") - if (MSVC) # instruction set detection for MSVC only if (GGML_NATIVE) @@ -318,7 +309,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME}) endif() - elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") message(STATUS "PowerPC detected") if (GGML_NATIVE) if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") @@ -344,9 +335,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_FLAGS -mcpu=${GGML_CPU_POWERPC_CPUTYPE}) endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + elseif (GGML_SYSTEM_ARCH STREQUAL "loongarch64") message(STATUS "loongarch64 detected") - list(APPEND ARCH_FLAGS -march=loongarch64) if (GGML_LASX) list(APPEND ARCH_FLAGS -mlasx) @@ -354,8 +344,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_LSX) list(APPEND ARCH_FLAGS -mlsx) endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") - message(STATUS "RISC-V detected") + elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") + message(STATUS "riscv64 detected") if (GGML_RVV) if (GGML_XTHEADVECTOR) list(APPEND ARCH_FLAGS -march=rv64gc_xtheadvector -mabi=lp64d) @@ -365,7 +355,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d) endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") + elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") message(STATUS "s390x detected") file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index fe4a5a8336..40bded4767 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -6995,7 +6995,11 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); +#ifdef __ARM_FEATURE_MATMUL_INT8 + assert((nrc == 2) || (nrc == 1)); +#else assert(nrc == 1); +#endif UNUSED(nrc); UNUSED(bx); UNUSED(by); @@ -7012,6 +7016,146 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi uint32_t utmp[4]; +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_K * GGML_RESTRICT x0 = x; + const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); + const block_q8_K * GGML_RESTRICT y0 = y; + const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); + + const uint8x16_t m4b = vdupq_n_u8(0x0f); + + float32x4_t vfsum = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { + const uint8_t * GGML_RESTRICT qx0 = x0->qs; + const uint8_t * GGML_RESTRICT qx1 = x1->qs; + const int8_t * GGML_RESTRICT qy0 = y0->qs; + const int8_t * GGML_RESTRICT qy1 = y1->qs; + + // decode scales and mins + int8_t x0_scales[8], x1_scales[8]; + int16x8_t x0_mins, x1_mins; + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x0->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x0_scales, scales, 8); + } + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x1->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x1_scales, scales, 8); + } + + int32x4_t visum = {0}; + + // process 64 data points per iteration, totally 256 data points + for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) { + const int8x16x4_t vy0 = vld1q_s8_x4(qy0); + const int8x16x4_t vy1 = vld1q_s8_x4(qy1); + + int8x16_t vx0[4], vx1[4]; + { + const uint8x16x2_t vv = vld1q_u8_x2(qx0); + vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + { + const uint8x16x2_t vv = vld1q_u8_x2(qx1); + vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + + // process 32 data points (share same block scale) per iteration + for (int k = 0; k < 2; ++k) { + const int blk = j * 2 + k; + const int32x4_t block_scale = { + x0_scales[blk], + x0_scales[blk], + x1_scales[blk], + x1_scales[blk], + }; + + int32x4_t vr = {0}; + for (int l = 0; l < 2; ++l) { + const int idx = k * 2 + l; + const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]); + const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]); + const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]); + const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]); + const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64)); + const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64)); + const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64)); + const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64)); + vr = vmmlaq_s32(vr, vx_l, vy_l); + vr = vmmlaq_s32(vr, vx_h, vy_h); + } + // apply block scale, will NOT overflow + // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits + visum = vmlaq_s32(visum, vr, block_scale); + } + } + + // adjust bias, apply superblock scale + { + int32_t bias[4]; + // no obvious uplift from sve sdot-16, just use neon mul add + const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8)); + const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8)); + bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins)))); + bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins)))); + bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins)))); + bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); + const float32x4_t dmins = { + GGML_FP16_TO_FP32(x0->dmin) * y0->d, + GGML_FP16_TO_FP32(x0->dmin) * y1->d, + GGML_FP16_TO_FP32(x1->dmin) * y0->d, + GGML_FP16_TO_FP32(x1->dmin) * y1->d, + }; + vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); + + const float32x4_t superblock_scale = { + GGML_FP16_TO_FP32(x0->d) * y0->d, + GGML_FP16_TO_FP32(x0->d) * y1->d, + GGML_FP16_TO_FP32(x1->d) * y0->d, + GGML_FP16_TO_FP32(x1->d) * y1->d, + }; + vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); + } + } + + // vfsum = ABCD -> ACBD + // AC -> s, BD -> (s+bs) + vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); + vst1_f32(s, vget_low_f32 (vfsum)); + vst1_f32(s + bs, vget_high_f32(vfsum)); + + return; + } +#endif + #ifdef __ARM_FEATURE_SVE float sumf = 0; for (int i = 0; i < nb; ++i) { diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index aa51dc21a5..1dc425fef1 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -270,7 +270,11 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .from_float = quantize_row_q4_K, .vec_dot = ggml_vec_dot_q4_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, +#if defined (__ARM_FEATURE_MATMUL_INT8) + .nrows = 2, +#else .nrows = 1, +#endif }, [GGML_TYPE_Q5_K] = { .from_float = quantize_row_q5_K, diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 26501b7118..d8de7531b0 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7633,39 +7633,83 @@ static void ggml_compute_forward_ssm_scan_f32( const int ir1 = MIN(ir0 + dr, nr); const int ir = ir1 - ir0; - for (int i3 = 0; i3 < n_s; ++i3) { - for (int i2 = 0; i2 < n_t; ++i2) { - const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} - const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} - const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} - const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} - const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} - float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} + #ifdef __ARM_FEATURE_SVE + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} + const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} + const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} + const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} + const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} + float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} - // use the output as the source for the next token-wise iterations - if (i2 > 0) { s0 = s; } + // use the output as the source for the next token-wise iterations + if (i2 > 0) { s0 = s; } - // d_inner - for (int i1 = 0; i1 < ir; ++i1) { - // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78 - float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; - float x_dt = x[i1] * dt_soft_plus; - float sumf = 0.0f; - // d_state - for (int i0 = 0; i0 < nc; ++i0) { - int i = i0 + i1*nc; - // state = prev_state * dA + dB * x - float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[i0]; - s[i] = state; + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; + float x_dt = x[i1] * dt_soft_plus; + svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); + svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); + svfloat32_t r1_vector = GGML_F32_VEC_ZERO; + + for (int64_t k = 0; k < nc; k += svcntw()) { + svfloat32_t vA = GGML_F32_VEC_LOAD(&A[i1*nc + k]); + svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k]); + svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k]); + svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[i1*nc + k]); + + svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); + t1 = exp_ps_sve(svptrue_b32(), t1); + svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); + + vs0 = GGML_F32_VEC_FMA(vs0, t1, t2); + r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); + + GGML_F32_VEC_STORE(&s[i1*nc + k], vs0); + } + y[i1] = GGML_F32xt_REDUCE_ONE(r1_vector); } - y[i1] = sumf; } } - } + #else + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} + const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} + const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} + const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} + const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} + float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} + float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} + + // use the output as the source for the next token-wise iterations + if (i2 > 0) { s0 = s; } + + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78 + float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; + float x_dt = x[i1] * dt_soft_plus; + float sumf = 0.0f; + // d_state + for (int i0 = 0; i0 < nc; ++i0) { + int i = i0 + i1*nc; + // state = prev_state * dA + dB * x + float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[i0]; + s[i] = state; + } + y[i1] = sumf; + } + } + } + #endif } void ggml_compute_forward_ssm_scan( @@ -8070,6 +8114,14 @@ static void ggml_compute_forward_rwkv_wkv6_f32( #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define WKV_VECTOR_SIZE 16 + #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + #define GGML_F32X GGML_F32xt + #define GGML_F32X_SET1 GGML_F32xt_SET1 + #define GGML_F32X_LOAD GGML_F32xt_LOAD + #define GGML_F32X_STORE GGML_F32xt_STORE + #define GGML_F32X_MUL GGML_F32xt_MUL + #define GGML_F32X_FMA GGML_F32xt_FMA + #define WKV_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 @@ -8080,8 +8132,14 @@ static void ggml_compute_forward_rwkv_wkv6_f32( #define WKV_VECTOR_SIZE 4 #endif + int wkv_vector_size; #ifdef WKV_VECTOR_SIZE - const int64_t vec_count = head_size / WKV_VECTOR_SIZE; + #if defined(__ARM_FEATURE_SVE) + wkv_vector_size = svcntw(); + #else + wkv_vector_size = WKV_VECTOR_SIZE; + #endif + const int64_t vec_count = head_size / wkv_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; @@ -8111,7 +8169,7 @@ static void ggml_compute_forward_rwkv_wkv6_f32( GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * WKV_VECTOR_SIZE; + size_t base_j = j * wkv_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; @@ -8136,7 +8194,7 @@ static void ggml_compute_forward_rwkv_wkv6_f32( } // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * WKV_VECTOR_SIZE; j < head_size; j++) { + for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; @@ -8272,6 +8330,14 @@ static void ggml_compute_forward_gla_f32( #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define GLA_VECTOR_SIZE 16 + #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + #define GGML_F32X GGML_F32xt + #define GGML_F32X_SET1 GGML_F32xt_SET1 + #define GGML_F32X_LOAD GGML_F32xt_LOAD + #define GGML_F32X_STORE GGML_F32xt_STORE + #define GGML_F32X_MUL GGML_F32xt_MUL + #define GGML_F32X_FMA GGML_F32xt_FMA + #define GLA_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 @@ -8282,8 +8348,14 @@ static void ggml_compute_forward_gla_f32( #define GLA_VECTOR_SIZE 4 #endif + int gla_vector_size; #ifdef GLA_VECTOR_SIZE - const int64_t vec_count = head_size / GLA_VECTOR_SIZE; + #if defined(__ARM_FEATURE_SVE) + gla_vector_size = svcntw(); + #else + gla_vector_size = GLA_VECTOR_SIZE; + #endif + const int64_t vec_count = head_size / gla_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; @@ -8310,7 +8382,7 @@ static void ggml_compute_forward_gla_f32( GGML_F32X g_vec = GGML_F32X_SET1(g_val); for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * GLA_VECTOR_SIZE; + size_t base_j = j * gla_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; @@ -8334,7 +8406,7 @@ static void ggml_compute_forward_gla_f32( } // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * GLA_VECTOR_SIZE; j < head_size; j++) { + for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; @@ -8443,83 +8515,126 @@ static void ggml_compute_forward_rwkv_wkv7_f32( int64_t h_stride_2d = head_size * head_size; #if defined(GGML_SIMD) - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + #if defined(__ARM_FEATURE_SVE) + // scalar Route to scalar implementation //TODO: Write SVE code + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; - for (int64_t ii = 0; ii < head_size; ii++) { - int64_t t_h_i_offset = t_h_offset + ii; - int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + float v_val = v[t_h_i_offset]; - float sa = 0; - { - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); - ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); - sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); - } + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; } - GGML_F32_VEC_REDUCE(sa, sum); - } - GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; - int64_t j = 0; - GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - for (; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; - int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; - - GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); - GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); - GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); - GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); - - k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); - - GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); - // kv + s * decay + sa * b - state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); - state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); - GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); - - result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; } - } - GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); - - // There shouldn't be left-overs though. - for (; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v[t_h_i_offset] * k_val; - - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + dst_data[t_h_i_offset] = result; } } } - } + #else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } + #endif #else for (int64_t t = 0; t < T; t++) { int64_t t_offset = t * t_stride; diff --git a/ggml/src/ggml-cpu/simd-mappings.h b/ggml/src/ggml-cpu/simd-mappings.h index 45c31cf1fa..2e3669c018 100644 --- a/ggml/src/ggml-cpu/simd-mappings.h +++ b/ggml/src/ggml-cpu/simd-mappings.h @@ -17,7 +17,123 @@ // number of elements to fit in a single register // -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_FMA) + +#define GGML_SIMD + +// F32 SVE +#define GGML_F32_EPR 8 +#define DEFAULT_PG svptrue_b32() + +#define GGML_F32xt svfloat32_t +#define GGML_F32xt_ZERO svdup_n_f32(0.0f) +#define GGML_F32xt_SET1(x) svdup_n_f32(x) +#define GGML_F32xt_LOAD_IMPL(pg, a, ...) svld1_f32(pg, a) +#define GGML_F32xt_LOAD(...) GGML_F32xt_LOAD_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_STORE_IMPL(pg,a,b) svst1_f32(pg, a, b) +#define GGML_F32xt_STORE(...) GGML_F32xt_STORE_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, a, b, c) +#define GGML_F32xt_FMA(...) GGML_F32xt_FMA_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b) +#define GGML_F32xt_ADD(...) GGML_F32xt_ADD_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_MUL_IMPL(pg, a, b) svmul_f32_m(pg, a, b) +#define GGML_F32xt_MUL(...) GGML_F32xt_MUL_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_REDUCE_ONE_IMPL(pg, a) svaddv(pg, a) +#define GGML_F32xt_REDUCE_ONE(...) GGML_F32xt_REDUCE_ONE_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_REDUCE_IMPL(pg, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \ +{ \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum2); \ + sum3 = svadd_f32_m(DEFAULT_PG, sum3, sum4); \ + sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum6); \ + sum7 = svadd_f32_m(DEFAULT_PG, sum7, sum8); \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum3); \ + sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum7); \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum5); \ + (res) = (ggml_float) GGML_F32xt_REDUCE_ONE(sum1); \ +} +#define GGML_F32xt_REDUCE(...) GGML_F32xt_REDUCE_IMPL(DEFAULT_PG, __VA_ARGS__) + +#define GGML_F32_VEC GGML_F32xt +#define GGML_F32_VEC_ZERO GGML_F32xt_ZERO +#define GGML_F32_VEC_SET1 GGML_F32xt_SET1 +#define GGML_F32_VEC_LOAD GGML_F32xt_LOAD +#define GGML_F32_VEC_STORE GGML_F32xt_STORE +#define GGML_F32_VEC_FMA GGML_F32xt_FMA +#define GGML_F32_VEC_ADD GGML_F32xt_ADD +#define GGML_F32_VEC_MUL GGML_F32xt_MUL +#define GGML_F32_VEC_REDUCE GGML_F32xt_REDUCE + +// F16 NEON + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + #define GGML_F16_STEP 32 + #define GGML_F16_EPR 8 + + #define GGML_F16x8 float16x8_t + #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) + #define GGML_F16x8_SET1(x) vdupq_n_f16(x) + #define GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x)) + #define GGML_F16x8_STORE vst1q_f16 + #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) + #define GGML_F16x8_ADD vaddq_f16 + #define GGML_F16x8_MUL vmulq_f16 + #define GGML_F16x8_REDUCE(res, x) \ + do { \ + int offset = GGML_F16_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \ + const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \ + (res) = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ + } while (0) + + #define GGML_F16_VEC GGML_F16x8 + #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO + #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((__fp16 *)(p), (r)[i]) + #define GGML_F16_VEC_FMA GGML_F16x8_FMA + #define GGML_F16_VEC_ADD GGML_F16x8_ADD + #define GGML_F16_VEC_MUL GGML_F16x8_MUL + #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE +#else + // if FP16 vector arithmetic is not supported, we use FP32 instead + // and take advantage of the vcvt_ functions to convert to/from FP16 + + #define GGML_F16_STEP 16 + #define GGML_F16_EPR 4 + + #define GGML_F32Cx4 float32x4_t + #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) + #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) + #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x))) + #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) + #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) + #define GGML_F32Cx4_ADD vaddq_f32 + #define GGML_F32Cx4_MUL vmulq_f32 + #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE + + #define GGML_F16_VEC GGML_F32Cx4 + #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO + #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((__fp16 *)(p), r[i]) + #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA + #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD + #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL + #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE +#endif + +#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) #define GGML_SIMD diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index 02d4061822..f7614568ea 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -17,29 +17,98 @@ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * G #if defined(GGML_SIMD) float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + #if defined(__ARM_FEATURE_SVE) + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t sum1 = svdup_n_f32(0.0f); + svfloat32_t sum2 = svdup_n_f32(0.0f); + svfloat32_t sum3 = svdup_n_f32(0.0f); + svfloat32_t sum4 = svdup_n_f32(0.0f); + svfloat32_t sum5 = svdup_n_f32(0.0f); + svfloat32_t sum6 = svdup_n_f32(0.0f); + svfloat32_t sum7 = svdup_n_f32(0.0f); + svfloat32_t sum8 = svdup_n_f32(0.0f); + svfloat32_t ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8; + svfloat32_t ay1,ay2,ay3,ay4,ay5,ay6,ay7,ay8; + for (int i = 0; i < np; i += ggml_f32_step) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + sum1 = GGML_F32_VEC_FMA(ax1, ay1, sum1); - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + sum2 = GGML_F32_VEC_FMA(ax2, ay2, sum2); - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); + sum3 = GGML_F32_VEC_FMA(ax3, ay3, sum3); + + ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); + sum4 = GGML_F32_VEC_FMA(ax4, ay4, sum4); + + ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); + sum5 = GGML_F32_VEC_FMA(ax5, ay5, sum5); + + ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); + sum6 = GGML_F32_VEC_FMA(ax6, ay6, sum6); + + ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); + sum7 = GGML_F32_VEC_FMA(ax7, ay7, sum7); + + ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); + sum8 = GGML_F32_VEC_FMA(ax8, ay8, sum8); } - } + // leftovers + // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop + const int np2 = (n & ~(ggml_f32_epr - 1)); + for (int i = np; i < np2; i += ggml_f32_epr) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + sum1 = GGML_F32_VEC_FMA(ax1, ay1, sum1); + } + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np2 < n) { + svbool_t pg = svwhilelt_b32(np2, n); + ax1 = svld1_f32(pg, x + np2); + ay1 = svld1_f32(pg, y + np2); + sum1 = svmad_f32_m(pg, ax1, ay1, sum1); + } + // reduce sum1,sum2 to sum1 + GGML_F32_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8); + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i]*y[i]; - } + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i]*y[i]; + } + #endif #else // scalar ggml_float sumf = 0.0; diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index c77349ebe4..09dbade217 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -5,6 +5,7 @@ #include "ggml-impl.h" #include "simd-mappings.h" #include "ggml.h" +#include "ggml-cpu.h" #if defined(GGML_USE_ACCELERATE) #include @@ -148,27 +149,108 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GG inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const float * GGML_RESTRICT x, const float v) { #if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); + #if defined(__ARM_FEATURE_SVE) - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; + svfloat32_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; + for (int i = 0; i < np; i += ggml_f32_step) { - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_FMA(ax1, vx, ay1); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + GGML_F32_VEC_STORE(y + i, ay1); + + ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_FMA(ax2, vx, ay2); + + GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); + + ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_FMA(ax3, vx, ay3); + + GGML_F32_VEC_STORE(y + i + 2*ggml_f32_epr, ay3); + + ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_FMA(ax4, vx, ay4); + + GGML_F32_VEC_STORE(y + i + 3*ggml_f32_epr, ay4); + + ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_FMA(ax5, vx, ay5); + + GGML_F32_VEC_STORE(y + i + 4*ggml_f32_epr, ay5); + + ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_FMA(ax6, vx, ay6); + + GGML_F32_VEC_STORE(y + i + 5*ggml_f32_epr, ay6); + + ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_FMA(ax7, vx, ay7); + + GGML_F32_VEC_STORE(y + i + 6*ggml_f32_epr, ay7); + + ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_FMA(ax8, vx, ay8); + + GGML_F32_VEC_STORE(y + i + 7*ggml_f32_epr, ay8); } - } + // leftovers + // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop + const int np2 = (n & ~(ggml_f32_epr - 1)); + for (int i = np; i < np2; i += ggml_f32_epr) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_FMA(ax1, vx, ay1); - // leftovers - for (int i = np; i < n; ++i) { - y[i] += x[i]*v; - } + GGML_F32_VEC_STORE(y + i, ay1); + } + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np2 < n) { + svbool_t pg =svwhilelt_b32(np2, n); + ax1 = svld1_f32(pg, x + np2); + ay1 = svld1_f32(pg, y + np2); + ay1 = svmad_f32_m(pg, ax1, vx, ay1); + + svst1_f32(pg, y + np2, ay1); + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] += x[i]*v; + } + #endif #else // scalar for (int i = 0; i < n; ++i) { @@ -220,36 +302,45 @@ inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int } #if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; - - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - vx[k] = GGML_F32_VEC_SET1(v[k][0]); - } - - GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + #if defined(__ARM_FEATURE_SVE) + // scalar Route to scalar implementation //TODO: Write SVE code + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int i = 0; i < n; ++i) { + y[i] += x[k][i]*v[k][0]; } - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } - } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // leftovers - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - for (int i = np; i < n; ++i) { - y[i] += x[k][i]*v[k][0]; + GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; + + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + vx[k] = GGML_F32_VEC_SET1(v[k][0]); } - } + + GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + } + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int i = np; i < n; ++i) { + y[i] += x[k][i]*v[k][0]; + } + } + #endif #else // scalar for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { @@ -265,25 +356,53 @@ inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #if defined(GGML_USE_ACCELERATE) vDSP_vsmul(y, 1, &v, y, 1, n); #elif defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); + #if defined(__ARM_FEATURE_SVE) + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 2 * ggml_f32_epr; - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t ay1; + svfloat32_t ay2; + for (int i = 0; i < np; i += ggml_f32_step) { + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_MUL(ay1, vx); + GGML_F32_VEC_STORE(y + i, ay1); - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_MUL(ay[j], vx); - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_MUL(ay2, vx); + GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); } - } + // leftovers + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np < n) { + svbool_t pg = svwhilelt_b32(np, n); + ay1 = svld1_f32(pg, y + np); + ay1 = svmul_f32_m(pg, ay1, vx); + svst1_f32(pg, y + np, ay1); + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // leftovers - for (int i = np; i < n; ++i) { - y[i] *= v; - } + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_MUL(ay[j], vx); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] *= v; + } + #endif #else // scalar for (int i = 0; i < n; ++i) { @@ -528,6 +647,42 @@ inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) { #error "ref: https://github.com/ggml-org/llama.cpp/pull/7154#issuecomment-2143844461" #endif +/* Below function was borrowed from the GitHub repository: +https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp */ +#if defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + inline static svfloat32_t exp_ps_sve(svbool_t pg, svfloat32_t src) { + // Constants + const svfloat32_t log2_e = svdup_n_f32(1.4426950409f); + const svfloat32_t ln2 = svdup_n_f32(0.6931473921f); + const svfloat32_t half_ln2_sq = svdup_n_f32(0.2413862043f); + const svuint32_t not_mask17 = svdup_n_u32(~((1u << 17) - 1)); + const svfloat32_t one = svdup_n_f32(1.0f); + const svfloat32_t inactive1 = svdup_n_f32(0.0f); + const svint32_t inactive2 = svdup_n_s32(0); + + // Algorithm starts here + svfloat32_t t0 = svmul_f32_m(pg, src, log2_e); // y = x * log2(e) + svfloat32_t t1 = svrintm_f32_m(inactive1, pg, t0); // rount to int (float) + svint32_t t2 = svcvt_s32_f32_m(inactive2, pg, t1); // n + + t1 = svsub_f32_m(pg, t0, t1); // a = y - floor(y) + t1 = svadd_f32_m(pg, t1, one); // b = a + 1 + + svuint32_t t3 = svlsr_n_u32_m(pg, svreinterpret_u32_f32(t1), 17); // v = b >> 17 (u32) + svfloat32_t t4 = svexpa_f32(t3); // c = fexpa(v) + t4 = svscale_f32_m(pg, t4, t2); // fexpa(v) * 2^(n) + + // and_(t2.d, t1.d, not_mask17.d) + svfloat32_t t5 = svreinterpret_f32_u32(svand_u32_m(pg, svreinterpret_u32_f32(t1), not_mask17)); + t5 = svsub_f32_m(pg, t1, t5); // z + t0 = svmla_f32_m(pg, ln2, t5, half_ln2_sq); // ln2 + half_ln2_sq * z + t0 = svmla_f32_m(pg, one, t5, t0); // 1 + (ln2 * z) + (half_ln2_sq * z * z) + t0 = svmul_f32_m(pg, t0, t4); // Final result + + return t0; + } +#endif + #if defined(__ARM_NEON) && defined(__aarch64__) // adapted from arm limited optimized routine diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index df450b1878..e1ce1d4cd1 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -635,6 +635,7 @@ struct ggml_cuda_device_info { int nsm; // number of streaming multiprocessors size_t smpb; // max. shared memory per block size_t smpbo; // max. shared memory per block (with opt-in) + bool integrated; // Device is integrated as opposed to discrete bool vmm; // virtual memory support size_t vmm_granularity; // granularity of virtual memory size_t total_vram; diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index a4fbd82363..cfab2b5eba 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -623,8 +623,8 @@ static __global__ void flash_attn_combine_results( __builtin_assume(tid < D); extern __shared__ float2 meta[]; - if (tid < 2*parallel_blocks) { - ((float *) meta)[threadIdx.x] = ((const float *)VKQ_meta) [blockIdx.z*(2*parallel_blocks) + tid]; + for (int i = tid; i < 2*parallel_blocks; i += D) { + ((float *) meta)[i] = ((const float *)VKQ_meta) [blockIdx.z*(2*parallel_blocks) + i]; } __syncthreads(); diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 7120053b6e..925f39e890 100644 --- a/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -1246,7 +1246,7 @@ static __global__ void flash_attn_ext_f16( NO_DEVICE_CODE; return; } -#endif __CUDA_ARCH__ == GGML_CUDA_CC_TURING +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV"); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index c442a64924..2a6f7f108b 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -243,10 +243,10 @@ static ggml_cuda_device_info ggml_cuda_init() { info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; - - info.devices[id].nsm = prop.multiProcessorCount; - info.devices[id].smpb = prop.sharedMemPerBlock; - info.devices[id].warp_size = prop.warpSize; + info.devices[id].integrated = prop.integrated; + info.devices[id].nsm = prop.multiProcessorCount; + info.devices[id].smpb = prop.sharedMemPerBlock; + info.devices[id].warp_size = prop.warpSize; #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; @@ -1065,6 +1065,10 @@ static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_ GGML_UNUSED(buft); } +static bool ggml_backend_buft_is_cuda_host(ggml_backend_buffer_type_t buft) { + return buft->iface.get_name == ggml_backend_cuda_host_buffer_type_name; +} + static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { CUDA_CHECK(cudaFreeHost(buffer->context)); } @@ -2641,6 +2645,8 @@ static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph, bool & graph_evaluated_or_captured, bool & use_cuda_graph, bool & cuda_graph_update_required) { + // flag used to determine whether it is an integrated_gpu + const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated; while (!graph_evaluated_or_captured) { // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph. @@ -2659,7 +2665,7 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx if (node->src[j] != nullptr) { assert(node->src[j]->buffer); assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || - ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft)); + ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft) || (integrated && ggml_backend_buft_is_cuda_host(node->src[j]->buffer->buft))); } } #endif @@ -2994,9 +3000,12 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g { struct ggml_tensor * a = op->src[0]; struct ggml_tensor * b = op->src[1]; - // for small weight matrices the active device can end up without any rows, don't use row split in those cases - // this avoids some edge cases (and the performance would not be good anyways) if (a->buffer && ggml_backend_buft_is_cuda_split(a->buffer->buft)) { + if (a->ne[2] > 1 || a->ne[3] > 1) { + return false; + } + // for small weight matrices the active device can end up without any rows, don't use row split in those cases + // this avoids some edge cases (and the performance would not be good anyways) ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) a->buffer->buft->context; int64_t row_low; int64_t row_high; @@ -3263,7 +3272,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g } static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev; + ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; + const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; + return (((ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev) || (integrated && ggml_backend_buft_is_cuda_host(buft))); } static int64_t get_op_batch_size(const ggml_tensor * op) { diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index e96e1f2488..bcd2ea5366 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4257,14 +4257,6 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_SOFT_MAX: return true; case GGML_OP_ROPE: - { - const int mode = ((const int32_t *) op->op_params)[2]; - // mode is not used as a bitmask in practice, the various rope type modes are independent implementations - if (mode == GGML_ROPE_TYPE_MROPE) { - return false; - } - return true; - } case GGML_OP_IM2COL: return true; case GGML_OP_UPSCALE: diff --git a/ggml/src/ggml-sycl/rope.cpp b/ggml/src/ggml-sycl/rope.cpp index a6516a7e1b..44473e1e55 100644 --- a/ggml/src/ggml-sycl/rope.cpp +++ b/ggml/src/ggml-sycl/rope.cpp @@ -49,10 +49,7 @@ static void rope_norm(const T * x, T * dst, const int ne0, const int ne1, const if (i0 >= n_dims) { const int i = row * ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - + *reinterpret_cast *>(dst + i) = *reinterpret_cast *>(x + i); return; } @@ -93,10 +90,7 @@ static void rope_neox(const T * x, T * dst, const int ne0, const int ne1, const if (i0 >= n_dims) { const int i = row * ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - + *reinterpret_cast *>(dst + i) = *reinterpret_cast *>(x + i); return; } @@ -122,6 +116,63 @@ static void rope_neox(const T * x, T * dst, const int ne0, const int ne1, const dst[i + n_dims / 2] = x0 * sin_theta + x1 * cos_theta; } +template +static void rope_multi(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale, + const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, + const float theta_scale, const float * freq_factors, const mrope_sections sections, + const sycl::nd_item<3> & item_ct1) { + // get index pos + const int i0 = 2 * (item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1)); + if (i0 >= ne0) { + return; + } + const int row_dst = (item_ct1.get_group(2) * item_ct1.get_local_range(2)) + item_ct1.get_local_id(2); + + if (i0 >= n_dims) { + const int i = row_dst*ne0 + i0; + *reinterpret_cast *>(dst + i) = *reinterpret_cast *>(x + i); + return; + } + + const int row_x = row_dst % ne1; + const int channel_x = row_dst / ne1; + const int idst = (row_dst * ne0) + (i0 / 2); + const size_t ix = ((size_t) channel_x * s2) + ((size_t) row_x * s1) + (i0 / 2); + + const int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3]; + const int sec_w = sections.v[1] + sections.v[0]; + const int sector = (i0 / 2) % sect_dims; + + + float theta_base = 0.0; + if (sector < sections.v[0]) { + theta_base = pos[channel_x]*sycl::pow(theta_scale, i0/2.0f); + } + else if (sector >= sections.v[0] && sector < sec_w) { + theta_base = pos[channel_x + ne2 * 1]*sycl::pow(theta_scale, i0/2.0f); + } + else if (sector >= sec_w && sector < sec_w + sections.v[2]) { + theta_base = pos[channel_x + ne2 * 2]*sycl::pow(theta_scale, i0/2.0f); + } + else if (sector >= sec_w + sections.v[2]) { + theta_base = pos[channel_x + ne2 * 3]*sycl::pow(theta_scale, i0/2.0f); + } + + const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f; + float cos_theta; + float sin_theta; + rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); + const float x0 = x[ix + 0]; + const float x1 = x[ix + n_dims/2]; + + // store results in dst + dst[idst + 0] = x0 * cos_theta - x1 * sin_theta; + dst[idst + n_dims/2] = x0 * sin_theta + x1 * cos_theta; +} + + + template static void rope_vision(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale, @@ -171,7 +222,7 @@ static void rope_norm_sycl(const T * x, T * dst, const int ne0, const int ne1, c const float * freq_factors, queue_ptr stream) { GGML_ASSERT(ne0 % 2 == 0); const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); - const int num_blocks_x = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const int num_blocks_x = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); const sycl::range<3> block_nums(1, num_blocks_x, nr); const float theta_scale = powf(freq_base, -2.0f / n_dims); @@ -208,7 +259,7 @@ static void rope_neox_sycl(const T * x, T * dst, const int ne0, const int ne1, c const rope_corr_dims corr_dims, const float * freq_factors, queue_ptr stream) { GGML_ASSERT(ne0 % 2 == 0); const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); - const int num_blocks_x = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const int num_blocks_x = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); const sycl::range<3> block_nums(1, num_blocks_x, nr); const float theta_scale = powf(freq_base, -2.0f / n_dims); @@ -228,6 +279,40 @@ static void rope_neox_sycl(const T * x, T * dst, const int ne0, const int ne1, c } } +template +static void rope_multi_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int nr, const int32_t * pos, + const float freq_scale, const float freq_base, const float ext_factor, + const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, + const mrope_sections sections, queue_ptr stream) { + GGML_ASSERT(ne0 % 2 == 0); + const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); + const int n_blocks_y = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); + const sycl::range<3> grid_dims(1, n_blocks_y, nr); + const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims); + + const float theta_scale = std::pow(freq_base, -2.0f / n_dims); + // Add FP16 capability check if T could be sycl::half + if constexpr (std::is_same_v) { + dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); + } + // launch kernel + if (freq_factors == nullptr) { + stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + rope_multi(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + }); + } else { + stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + rope_multi(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + }); + } +} + + + + // rope vision template static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, @@ -237,7 +322,7 @@ static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const mrope_sections sections, queue_ptr stream) { GGML_ASSERT(ne0 % 2 == 0); const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); - const int n_blocks_y = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const int n_blocks_y = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE)); const sycl::range<3> grid_dims(1, n_blocks_y, nr); const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims); @@ -298,8 +383,17 @@ inline void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) memcpy(§ions.v, (int32_t *) dst->op_params + 11, sizeof(int)*4); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + if (is_mrope) { + GGML_ASSERT(sections.v[0] > 0 || sections.v[1] > 0 || sections.v[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne00/2); + } + const int32_t * pos = (const int32_t *) dst->src[1]->data; const float * freq_factors = nullptr; @@ -326,6 +420,19 @@ inline void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) } else { GGML_ABORT("fatal error"); } + } else if (is_mrope && !is_vision) { + GGML_SYCL_DEBUG("%s: mrope path\n", __func__); + if (dst->src[0]->type == GGML_TYPE_F16) { + rope_multi_sycl((const sycl::half *)dst->src[0]->data, (sycl::half *)dst->data, ne00, ne01, ne02, s01, + s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, sections, main_stream); + } else if (dst->src[0]->type == GGML_TYPE_F32) { + rope_multi_sycl((const float *) dst->src[0]->data, (float *) dst->data, ne00, ne01, ne02, s01, s02, n_dims, + nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, + main_stream); + } else { + GGML_ABORT("Fatal error: Tensor type unsupported!"); + } } else if (is_vision) { GGML_SYCL_DEBUG("%s: vision path\n", __func__); if (dst->src[0]->type == GGML_TYPE_F16) { diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 31163effad..3ee2b2064e 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -177,6 +177,9 @@ class Keys: EMBEDDING_LENGTH = "{arch}.convnext.embedding_length" BLOCK_COUNT = "{arch}.convnext.block_count" + class Classifier: + OUTPUT_LABELS = "{arch}.classifier.output_labels" + class Tokenizer: MODEL = "tokenizer.ggml.model" PRE = "tokenizer.ggml.pre" @@ -1033,6 +1036,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.POS_EMBD, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.ATTN_OUT_NORM, + MODEL_TENSOR.ATTN_QKV, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_K, MODEL_TENSOR.ATTN_V, diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index e507b7019d..de6e45ae82 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -49,6 +49,7 @@ class TensorInfo: class GGUFValue: value: Any type: GGUFValueType + sub_type: GGUFValueType | None = None class WriterState(Enum): @@ -238,7 +239,7 @@ class GGUFWriter: for key, val in kv_data.items(): kv_bytes += self._pack_val(key, GGUFValueType.STRING, add_vtype=False) - kv_bytes += self._pack_val(val.value, val.type, add_vtype=True) + kv_bytes += self._pack_val(val.value, val.type, add_vtype=True, sub_type=val.sub_type) fout.write(kv_bytes) @@ -268,11 +269,11 @@ class GGUFWriter: fout.flush() self.state = WriterState.TI_DATA - def add_key_value(self, key: str, val: Any, vtype: GGUFValueType) -> None: + def add_key_value(self, key: str, val: Any, vtype: GGUFValueType, sub_type: GGUFValueType | None = None) -> None: if any(key in kv_data for kv_data in self.kv_data): raise ValueError(f'Duplicated key name {key!r}') - self.kv_data[0][key] = GGUFValue(value=val, type=vtype) + self.kv_data[0][key] = GGUFValue(value=val, type=vtype, sub_type=sub_type) def add_uint8(self, key: str, val: int) -> None: self.add_key_value(key,val, GGUFValueType.UINT8) @@ -1022,7 +1023,7 @@ class GGUFWriter: pack_prefix = '<' if self.endianess == GGUFEndian.LITTLE else '>' return struct.pack(f'{pack_prefix}{fmt}', value) - def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes: + def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool, sub_type: GGUFValueType | None = None) -> bytes: kv_data = bytearray() if add_vtype: @@ -1043,7 +1044,9 @@ class GGUFWriter: if len(val) == 0: raise ValueError("Invalid GGUF metadata array. Empty array") - if isinstance(val, bytes): + if sub_type is not None: + ltype = sub_type + elif isinstance(val, bytes): ltype = GGUFValueType.UINT8 else: ltype = GGUFValueType.get_type(val[0]) diff --git a/gguf-py/gguf/scripts/gguf_editor_gui.py b/gguf-py/gguf/scripts/gguf_editor_gui.py index 3d38b5cbab..05f4db0f8c 100755 --- a/gguf-py/gguf/scripts/gguf_editor_gui.py +++ b/gguf-py/gguf/scripts/gguf_editor_gui.py @@ -1521,19 +1521,21 @@ class GGUFEditorWindow(QMainWindow): continue # Apply changes if any + sub_type = None if field.name in self.metadata_changes: value_type, value = self.metadata_changes[field.name] if value_type == GGUFValueType.ARRAY: # Handle array values - element_type, array_values = value - writer.add_array(field.name, array_values) - else: - writer.add_key_value(field.name, value, value_type) + sub_type, value = value else: # Copy original value value = field.contents() - if value is not None and field.types: - writer.add_key_value(field.name, value, field.types[0]) + value_type = field.types[0] + if value_type == GGUFValueType.ARRAY: + sub_type = field.types[-1] + + if value is not None: + writer.add_key_value(field.name, value, value_type, sub_type=sub_type) # Add new metadata for key, (value_type, value) in self.metadata_changes.items(): @@ -1541,7 +1543,12 @@ class GGUFEditorWindow(QMainWindow): if self.reader.get_field(key) is not None: continue - writer.add_key_value(key, value, value_type) + sub_type = None + if value_type == GGUFValueType.ARRAY: + # Handle array values + sub_type, value = value + + writer.add_key_value(key, value, value_type, sub_type=sub_type) # Add tensors (including data) for tensor in self.reader.tensors: diff --git a/gguf-py/gguf/scripts/gguf_new_metadata.py b/gguf-py/gguf/scripts/gguf_new_metadata.py index 7aff6c9259..63f2300348 100755 --- a/gguf-py/gguf/scripts/gguf_new_metadata.py +++ b/gguf-py/gguf/scripts/gguf_new_metadata.py @@ -24,6 +24,7 @@ class MetadataDetails(NamedTuple): type: gguf.GGUFValueType value: Any description: str = '' + sub_type: gguf.GGUFValueType | None = None def get_field_data(reader: gguf.GGUFReader, key: str) -> Any: @@ -57,7 +58,9 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new logger.debug(f'Removing {field.name}') continue - old_val = MetadataDetails(field.types[0], field.contents()) + val_type = field.types[0] + sub_type = field.types[-1] if val_type == gguf.GGUFValueType.ARRAY else None + old_val = MetadataDetails(val_type, field.contents(), sub_type=sub_type) val = new_metadata.get(field.name, old_val) if field.name in new_metadata: @@ -67,7 +70,7 @@ def copy_with_new_metadata(reader: gguf.GGUFReader, writer: gguf.GGUFWriter, new logger.debug(f'Copying {field.name}') if val.value is not None: - writer.add_key_value(field.name, val.value, val.type) + writer.add_key_value(field.name, val.value, val.type, sub_type=sub_type if val.sub_type is None else val.sub_type) if gguf.Keys.Tokenizer.CHAT_TEMPLATE in new_metadata: logger.debug('Adding chat template(s)') diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 000ffd0061..93dd1d8028 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -157,6 +157,7 @@ class TensorNameMap: "h.{bid}.attn.c_attn", # gpt2 "transformer.h.{bid}.mixer.Wqkv", # phi2 "encoder.layers.{bid}.attn.Wqkv", # nomic-bert + "encoder.layers.{bid}.mixer.Wqkv", # jina "model.layers.{bid}.self_attn.qkv_proj", # phi3 "encoder.layers.{bid}.self_attention.query_key_value", # chatglm "transformer.layers.{bid}.attn.qkv_proj", # openelm @@ -168,6 +169,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom "layers.{bid}.attention.wq", # llama-pth "encoder.layer.{bid}.attention.self.query", # bert + "transformer.layer.{bid}.attention.q_lin", # distillbert "transformer.h.{bid}.attn.q_proj", # gpt-j "model.layers.layers.{bid}.self_attn.q_proj", # plamo "model.layers.{bid}.attention.wq", # internlm2 @@ -182,6 +184,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom "layers.{bid}.attention.wk", # llama-pth "encoder.layer.{bid}.attention.self.key", # bert + "transformer.layer.{bid}.attention.k_lin", # distillbert "transformer.h.{bid}.attn.k_proj", # gpt-j "transformer.h.{bid}.attn.k", # refact "model.layers.layers.{bid}.self_attn.k_proj", # plamo @@ -196,6 +199,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe "layers.{bid}.attention.wv", # llama-pth "encoder.layer.{bid}.attention.self.value", # bert + "transformer.layer.{bid}.attention.v_lin", # distillbert "transformer.h.{bid}.attn.v_proj", # gpt-j "transformer.h.{bid}.attn.v", # refact "model.layers.layers.{bid}.self_attn.v_proj", # plamo @@ -216,6 +220,7 @@ class TensorNameMap: "model.layers.{bid}.self_attn.linear_attn", # deci "layers.{bid}.attention.wo", # llama-pth "encoder.layer.{bid}.attention.output.dense", # bert + "transformer.layer.{bid}.attention.out_lin", # distillbert "transformer.h.{bid}.attn.out_proj", # gpt-j "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon "model.layers.{bid}.self_attn.dense", # persimmon @@ -224,6 +229,7 @@ class TensorNameMap: "model.layers.layers.{bid}.self_attn.o_proj", # plamo "model.layers.{bid}.attention.wo", # internlm2 "encoder.layers.{bid}.attn.out_proj", # nomic-bert + "encoder.layers.{bid}.mixer.out_proj", # jina "transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx "encoder.layers.{bid}.self_attention.dense", # chatglm @@ -235,6 +241,7 @@ class TensorNameMap: # Attention output norm MODEL_TENSOR.ATTN_OUT_NORM: ( "encoder.layer.{bid}.attention.output.LayerNorm", # bert + "transformer.layer.{bid}.sa_layer_norm", # distillbert "encoder.layers.{bid}.norm1", # nomic-bert "transformer.decoder_layer.{bid}.rms_norm_1", # Grok "transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx @@ -311,6 +318,7 @@ class TensorNameMap: "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2 "layers.{bid}.feed_forward.w3", # llama-pth "encoder.layer.{bid}.intermediate.dense", # bert + "transformer.layer.{bid}.ffn.lin1", # distillbert "transformer.h.{bid}.mlp.fc_in", # gpt-j "transformer.h.{bid}.mlp.linear_3", # refact "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon @@ -394,6 +402,7 @@ class TensorNameMap: "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2 "layers.{bid}.feed_forward.w2", # llama-pth "encoder.layer.{bid}.output.dense", # bert + "transformer.layer.{bid}.ffn.lin2", # distillbert "transformer.h.{bid}.mlp.fc_out", # gpt-j "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon @@ -455,6 +464,7 @@ class TensorNameMap: MODEL_TENSOR.LAYER_OUT_NORM: ( "encoder.layer.{bid}.output.LayerNorm", # bert + "transformer.layer.{bid}.output_layer_norm", # distillbert "encoder.layers.{bid}.norm2", # nomic-bert "transformer.decoder_layer.{bid}.rms_norm_3", # Grok "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2 @@ -825,6 +835,7 @@ class TensorNameMap: MODEL_TENSOR.CLS: ( "classifier", # jina "classifier.dense", # roberta + "pre_classifier", # distillbert ), MODEL_TENSOR.CLS_OUT: ( @@ -902,7 +913,6 @@ class TensorNameMap: MODEL_TENSOR.V_MMPROJ_FC: ( "model.connector.modality_projection.proj", # SmolVLM - "multi_modal_projector.linear_1", # llama 4 ), MODEL_TENSOR.V_MMPROJ_MLP: ( diff --git a/gguf-py/gguf/utility.py b/gguf-py/gguf/utility.py index e5251aef8c..00adcbc937 100644 --- a/gguf-py/gguf/utility.py +++ b/gguf-py/gguf/utility.py @@ -231,7 +231,7 @@ class SafetensorRemote: response.raise_for_status() # Get raw byte data - return response.content[:size] + return response.content[slice(size if size > -1 else None)] @classmethod def check_file_exist(cls, url: str) -> bool: diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index bb9b86ace7..f11351cba1 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.16.3" +version = "0.17.0" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ diff --git a/include/llama.h b/include/llama.h index 01762bea2b..da0f652cfd 100644 --- a/include/llama.h +++ b/include/llama.h @@ -259,9 +259,9 @@ extern "C" { llama_token * token; float * embd; llama_pos * pos; - int32_t * n_seq_id; - llama_seq_id ** seq_id; - int8_t * logits; // TODO: rename this to "output" + int32_t * n_seq_id; // TODO: remove, should belong to only 1 sequence + llama_seq_id ** seq_id; // TODO: become llama_seq_id * seq_id; + int8_t * logits; // TODO: rename this to "output" } llama_batch; enum llama_model_kv_override_type { @@ -366,6 +366,8 @@ extern "C" { bool no_perf; // measure performance timings bool op_offload; // offload host tensor operations to device bool swa_full; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055) + // NOTE: setting to false when n_seq_max > 1 can cause bad performance in some cases + // ref: https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573 }; // model quantization parameters @@ -502,6 +504,7 @@ extern "C" { LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_swa (const struct llama_model * model); // Get the model's RoPE frequency scaling factor LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); @@ -652,7 +655,6 @@ extern "C" { // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) LLAMA_API void llama_kv_self_seq_add( @@ -665,7 +667,6 @@ extern "C" { // Integer division of the positions by factor of `d > 1` // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) LLAMA_API void llama_kv_self_seq_div( @@ -677,12 +678,14 @@ extern "C" { // Returns the smallest position present in the KV cache for the specified sequence // This is typically non-zero only for SWA caches + // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the KV cache // Return -1 if the sequence is empty LLAMA_API llama_pos llama_kv_self_seq_pos_min( struct llama_context * ctx, llama_seq_id seq_id); // Returns the largest position present in the KV cache for the specified sequence + // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the KV cache // Return -1 if the sequence is empty LLAMA_API llama_pos llama_kv_self_seq_pos_max( struct llama_context * ctx, @@ -691,14 +694,15 @@ extern "C" { // Defragment the KV cache // This will be applied: // - lazily on next llama_decode() - // - explicitly with llama_kv_self_update() - LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); + LLAMA_API DEPRECATED(void llama_kv_self_defrag(struct llama_context * ctx), + "simply remove this call, the context will automatically decide when to do a defragmentation based on 'defrag_thold'"); // Check if the context supports KV cache shifting LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx); // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) - LLAMA_API void llama_kv_self_update(struct llama_context * ctx); + LLAMA_API DEPRECATED(void llama_kv_self_update(struct llama_context * ctx), + "simply remove this call, updates are applied lazily on the next llama_decode()"); // // State / sessions diff --git a/models/ggml-vocab-bert-bge.gguf.inp b/models/ggml-vocab-bert-bge.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-bert-bge.gguf.inp +++ b/models/ggml-vocab-bert-bge.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-bert-bge.gguf.out b/models/ggml-vocab-bert-bge.gguf.out index a62566ce75..b1c49672fa 100644 --- a/models/ggml-vocab-bert-bge.gguf.out +++ b/models/ggml-vocab-bert-bge.gguf.out @@ -1,5 +1,5 @@ 29464 2094 1018 1092 2706 - 11865 17875 + 9706 7959 2140 diff --git a/models/ggml-vocab-chameleon.gguf.inp b/models/ggml-vocab-chameleon.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-chameleon.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-chameleon.gguf.out b/models/ggml-vocab-chameleon.gguf.out deleted file mode 100644 index 7c5413fee0..0000000000 --- a/models/ggml-vocab-chameleon.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 17245 16604 16403 16604 33583 18355 - 16421 51153 - - 16604 - 16650 - 16650 16604 - 16581 - 16582 - 16582 16582 - 16582 16582 16582 - 16581 16582 - 31596 17394 - 34926 17394 - 31596 18671 - 34926 18671 - 34926 18671 16384 - 31596 16395 17394 16384 - 34926 16395 17394 16384 - 16811 16704 20410 16483 16631 16397 52854 - 16470 16399 16403 16407 16604 16406 35764 38185 51595 22592 26639 - 29479 23955 17012 20103 25527 27670 17408 19005 21473 24774 - 54254 42231 48084 29409 16617 61889 29409 16608 21954 16628 21954 16499 58445 29409 16607 58445 21954 16479 42231 21954 16611 21954 16607 21954 16633 21954 16611 29409 16607 21954 16615 - 52351 16604 16391 25825 16392 23686 16498 39161 18885 16618 16488 30853 16604 16391 54124 17153 25134 16656 18476 26169 16895 16392 62193 16611 16604 16391 24664 17153 57169 16721 16872 17073 17304 28729 16392 - 31596 - 34926 - 16650 31596 - 16650 34926 - 16696 31596 - 16696 31596 16582 16696 31596 - 16604 16391 - 16582 16604 16412 - 16390 22623 - 31596 16395 16712 16390 16828 16384 17674 16769 16732 23686 16607 16604 16414 24427 16623 41809 16495 28999 36469 45292 30197 16400 16402 16400 16403 16400 16404 16400 43969 65211 16636 - 16384 16384 16384 16384 16384 16384 - 16402 - 16402 16402 - 16402 16402 16402 - 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 16402 16402 - 16402 16402 16402 16402 16402 16402 16402 16402 16402 - 16418 19038 16639 16448 24315 33727 16467 - 18765 17981 - 16582 16604 16582 16582 16604 16582 16582 16582 16604 16581 16604 16581 16581 16604 16581 16582 16650 16582 16650 16604 16582 16696 16582 16696 16604 16582 52351 16604 16391 25825 16392 23686 16498 39161 18885 16618 16488 30853 16604 16391 54124 17153 25134 16656 18476 26169 16895 16392 62193 16611 20410 16483 16631 18885 16483 16631 16604 16402 16604 16402 16402 16604 16402 16402 16402 16604 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16402 16402 16604 16402 16402 16402 16402 16402 16402 16402 16402 16604 16402 16397 16402 16604 16402 16397 16397 16402 16604 16402 16397 16397 16397 16402 16604 54254 42231 48084 29409 16617 61889 29409 16608 21954 16628 21954 16499 58445 29409 16607 58445 21954 16479 42231 21954 16611 27683 16607 16604 16414 24427 16623 41809 16495 28999 36469 45292 30197 16400 16402 16400 16403 16400 16404 16400 43969 65211 16636 16604 16396 16396 16396 16396 16396 16396 16412 16412 16412 16412 16412 16412 16412 27268 23955 17012 20103 25527 27670 17408 19005 21473 24774 16604 16390 16390 16390 16390 16390 16390 16447 16447 16447 16447 16447 16447 16447 16385 16385 16385 16385 16397 16397 16397 16397 16397 16397 16384 16384 16384 16384 16384 16384 16414 16414 16414 16414 16414 16414 16687 16390 16690 16992 16604 16390 61797 16733 16390 16466 16986 16395 16604 16390 17879 16732 17811 16414 16604 16390 16428 16804 17811 16687 16390 16683 17190 16728 16395 16604 16390 16419 16732 16945 16991 25251 16414 17119 16390 38127 16641 16390 16459 16427 diff --git a/models/ggml-vocab-command-r.gguf.inp b/models/ggml-vocab-command-r.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-command-r.gguf.inp +++ b/models/ggml-vocab-command-r.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-command-r.gguf.out b/models/ggml-vocab-command-r.gguf.out index 3f6b418888..0e3af72eb1 100644 --- a/models/ggml-vocab-command-r.gguf.out +++ b/models/ggml-vocab-command-r.gguf.out @@ -1,5 +1,5 @@ 2536 228 27 228 22957 6983 - 45 193433 + 90711 87 20910 228 1667 diff --git a/models/ggml-vocab-deepseek-coder.gguf.inp b/models/ggml-vocab-deepseek-coder.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-deepseek-coder.gguf.inp +++ b/models/ggml-vocab-deepseek-coder.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-deepseek-coder.gguf.out b/models/ggml-vocab-deepseek-coder.gguf.out index 52c4111a18..ef6bc5b8a3 100644 --- a/models/ggml-vocab-deepseek-coder.gguf.out +++ b/models/ggml-vocab-deepseek-coder.gguf.out @@ -1,5 +1,5 @@ 1050 207 19 207 19192 4217 - 37 32009 71 6247 + 125 213 26862 282 207 243 diff --git a/models/ggml-vocab-deepseek-llm.gguf.inp b/models/ggml-vocab-deepseek-llm.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-deepseek-llm.gguf.inp +++ b/models/ggml-vocab-deepseek-llm.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-deepseek-llm.gguf.out b/models/ggml-vocab-deepseek-llm.gguf.out index 0191b7a115..f9d49c9afe 100644 --- a/models/ggml-vocab-deepseek-llm.gguf.out +++ b/models/ggml-vocab-deepseek-llm.gguf.out @@ -1,5 +1,5 @@ 1052 207 19 207 19109 4223 - 37 100014 71 6245 + 82077 26723 282 207 243 diff --git a/models/ggml-vocab-deepseek-r1-qwen.gguf.inp b/models/ggml-vocab-deepseek-r1-qwen.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-deepseek-r1-qwen.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-deepseek-r1-qwen.gguf.out b/models/ggml-vocab-deepseek-r1-qwen.gguf.out deleted file mode 100644 index 18b4b45cd1..0000000000 --- a/models/ggml-vocab-deepseek-r1-qwen.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 1122 220 19 220 26062 3951 - 37 50753 261 - - 220 - 256 - 262 - 197 - 198 - 271 - 1406 - 1572 - 9707 1879 - 21927 1879 - 9707 4337 - 21927 4337 - 21927 4337 0 - 9707 11 1879 0 - 21927 11 1879 0 - 419 374 11162 99 247 13 10821 - 86 15 19 23 220 22 83 1963 41808 11472 2940 16739 - 78762 14144 1456 13073 63471 33594 3038 133178 79012 - 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 147805 148301 147270 44258 223 146848 - 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 320 3243 42365 429 702 1181 1828 3950 8 - 9707 - 21927 - 220 21927 - 256 21927 - 262 21927 - 262 21927 198 262 21927 - 320 - 198 284 - 6 11385 - 9707 11 379 64848 0 2585 525 498 26525 223 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 - 17085 2928 - 18 - 18 18 - 18 18 18 - 18 18 18 18 - 18 18 18 18 18 - 18 18 18 18 18 18 - 18 18 18 18 18 18 18 - 18 18 18 18 18 18 18 18 - 18 18 18 18 18 18 18 18 18 - 34 90063 128324 - 2560 2347 - 198 4710 14731 65497 7847 1572 2303 78672 10947 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 11162 99 247 149955 220 18 220 18 18 220 18 18 18 220 18 18 18 18 220 18 18 18 18 18 220 18 18 18 18 18 18 220 18 18 18 18 18 18 18 220 18 18 18 18 18 18 18 18 220 18 13 18 220 18 496 18 220 18 1112 18 220 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 144534 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 55460 53237 18658 14144 1456 13073 63471 33594 3038 133178 79012 3355 4605 4605 13874 13874 73594 3014 3014 28149 17085 2928 26610 7646 358 3003 1012 364 83 813 566 594 1052 11 364 787 498 2704 30 364 44 537 2704 358 3278 1281 432 11 364 35 498 1075 1045 15243 30 1205 6 42612 264 63866 43 diff --git a/models/ggml-vocab-falcon.gguf.inp b/models/ggml-vocab-falcon.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-falcon.gguf.inp +++ b/models/ggml-vocab-falcon.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-falcon.gguf.out b/models/ggml-vocab-falcon.gguf.out index 64a48d97f7..6319de60e2 100644 --- a/models/ggml-vocab-falcon.gguf.out +++ b/models/ggml-vocab-falcon.gguf.out @@ -1,5 +1,5 @@ 878 204 31 3068 133 2137 - 28611 132 30042 + 34502 18614 286 204 258 diff --git a/models/ggml-vocab-gpt-2.gguf.inp b/models/ggml-vocab-gpt-2.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-gpt-2.gguf.inp +++ b/models/ggml-vocab-gpt-2.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-gpt-2.gguf.out b/models/ggml-vocab-gpt-2.gguf.out index 17a13bdfc3..6464ded3d2 100644 --- a/models/ggml-vocab-gpt-2.gguf.out +++ b/models/ggml-vocab-gpt-2.gguf.out @@ -1,5 +1,5 @@ 798 604 25208 1933 - 37 9116 71 11751 + 127 226 79 69 417 220 220 220 diff --git a/models/ggml-vocab-gpt-4o.gguf.inp b/models/ggml-vocab-gpt-4o.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-gpt-4o.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-gpt-4o.gguf.out b/models/ggml-vocab-gpt-4o.gguf.out deleted file mode 100644 index 478df726fa..0000000000 --- a/models/ggml-vocab-gpt-4o.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 1165 220 19 220 27124 5503 - 37 19194 259 - - 220 - 256 - 271 - 197 - 198 - 279 - 2499 - 2775 - 13225 2375 - 32949 2375 - 13225 5922 - 32949 5922 - 32949 5922 0 - 13225 11 2375 0 - 32949 11 2375 0 - 495 382 9552 99 247 13 17159 - 86 45404 220 22 10191 2852 22924 4750 6916 - 3907 53641 1235 185386 8118 - 11400 107516 15867 20804 22851 134178 77431 32010 104312 37984 16329 27751 89335 - 112927 222 350 14559 8 22861 114 2524 64364 104 15148 350 76466 166700 121942 780 8 91349 350 7393 74471 484 853 1617 2316 6602 8 - 13225 - 32949 - 220 32949 - 256 32949 - 271 32949 - 271 32949 198 271 32949 - 350 - 198 314 - 6 6837 - 13225 11 342 70653 0 3253 553 481 22861 223 1423 7522 18165 2178 34058 22369 16412 32999 16 867 8208 - 147475 - 18 - 2546 - 15517 - 15517 18 - 15517 2546 - 15517 15517 - 15517 15517 18 - 15517 15517 2546 - 15517 15517 15517 - 34 60213 53904 - 2960 3098 - 126470 25980 160432 16609 2775 4066 172261 19432 112927 222 350 14559 8 22861 114 2524 64364 104 15148 350 76466 166700 121942 780 8 91349 9552 99 247 4103 99 247 220 18 220 2546 220 15517 220 15517 18 220 15517 2546 220 15517 15517 220 15517 15517 18 220 15517 15517 2546 220 18 13 18 220 18 485 18 220 18 1008 18 44735 107516 15867 20804 22851 134178 77431 32010 104312 156437 1423 7522 18165 2178 34058 22369 16412 32999 16 867 8208 105024 106657 1967 53641 1235 185386 8118 22434 39336 26178 26178 168394 194663 27271 147475 25883 6961 9790 1339 461 83 1280 19016 1354 11 461 1099 481 3239 30 461 44 625 3239 17291 1520 480 11 461 35 481 1299 1236 17966 30 1416 6 27493 261 54602 43 diff --git a/models/ggml-vocab-llama-bpe.gguf.inp b/models/ggml-vocab-llama-bpe.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-llama-bpe.gguf.inp +++ b/models/ggml-vocab-llama-bpe.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-llama-bpe.gguf.out b/models/ggml-vocab-llama-bpe.gguf.out index 4b35cf93f7..a77376625a 100644 --- a/models/ggml-vocab-llama-bpe.gguf.out +++ b/models/ggml-vocab-llama-bpe.gguf.out @@ -1,5 +1,5 @@ 1142 220 19 220 27154 4038 - 37 51853 261 + 88075 16276 301 220 256 diff --git a/models/ggml-vocab-llama-spm.gguf.inp b/models/ggml-vocab-llama-spm.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-llama-spm.gguf.inp +++ b/models/ggml-vocab-llama-spm.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-llama-spm.gguf.out b/models/ggml-vocab-llama-spm.gguf.out index 93aacf8bae..2a71a6ef86 100644 --- a/models/ggml-vocab-llama-spm.gguf.out +++ b/models/ggml-vocab-llama-spm.gguf.out @@ -1,5 +1,5 @@ 474 287 29871 29946 29871 30226 7378 - 383 4000 261 + 11585 7810 295 259 1678 diff --git a/models/ggml-vocab-llama4.gguf.inp b/models/ggml-vocab-llama4.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-llama4.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-llama4.gguf.out b/models/ggml-vocab-llama4.gguf.out deleted file mode 100644 index 7ca46ce597..0000000000 --- a/models/ggml-vocab-llama4.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 1190 220 32 220 18215 7112 - 50 16800 258 - - 220 - 256 - 277 - 197 - 198 - 368 - 2946 - 3271 - 19873 3817 - 39715 3817 - 19873 7353 - 39715 7353 - 39715 7353 13 - 19873 24 3817 13 - 39715 24 3817 13 - 544 373 9522 112 247 26 36315 - 99 39923 220 35 9607 21498 21470 3679 9433 - 1595 7653 633 79829 34051 1636 - 8755 102595 115960 21125 148305 96819 102816 39048 14105 22528 160234 - 114590 222 330 14879 21 51358 127 12817 93293 117 24204 330 68239 881 120327 170428 21 89101 330 7384 88230 511 947 1492 3742 7233 21 - 19873 - 39715 - 220 39715 - 256 39715 - 277 39715 - 277 39715 198 277 39715 - 330 - 198 319 - 19 7359 - 19873 24 386 87799 13 2403 583 650 51358 223 1663 155736 1522 42056 7544 13336 28785 29 4412 20645 - 17931 4959 - 31 - 1922 - 12325 - 12325 31 - 12325 1922 - 12325 12325 - 12325 12325 31 - 12325 12325 1922 - 12325 12325 12325 - 47 19811 12077 - 3260 3579 - 198 7283 51499 191231 20192 3271 3322 9287 2143 17860 114590 222 330 14879 21 51358 127 12817 93293 117 24204 330 68239 881 120327 170428 21 89101 9522 112 247 172394 247 220 31 220 1922 220 12325 220 12325 31 220 12325 1922 220 12325 12325 220 12325 12325 31 220 12325 12325 1922 220 31 26 31 220 31 396 31 220 31 1043 31 117131 102595 115960 21125 148305 96819 102816 80883 223 1663 155736 1522 42056 7544 13336 28785 29 4412 20645 79745 150278 117079 633 79829 34051 1636 25611 41990 109428 1488 91054 24072 17931 4959 29795 9296 16517 1806 481 96 1386 36633 1609 24 481 1109 650 5074 43 481 57 702 5074 27088 2170 536 24 481 48 650 1933 1696 30262 43 1665 19 32818 262 27236 56 diff --git a/models/ggml-vocab-mpt.gguf.inp b/models/ggml-vocab-mpt.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-mpt.gguf.inp +++ b/models/ggml-vocab-mpt.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-mpt.gguf.out b/models/ggml-vocab-mpt.gguf.out index 372c751bf7..ca62669ad0 100644 --- a/models/ggml-vocab-mpt.gguf.out +++ b/models/ggml-vocab-mpt.gguf.out @@ -1,5 +1,5 @@ 728 577 24142 2607 - 39 26288 6554 + 37515 18569 293 209 50276 diff --git a/models/ggml-vocab-nomic-bert-moe.gguf.inp b/models/ggml-vocab-nomic-bert-moe.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-nomic-bert-moe.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-nomic-bert-moe.gguf.out b/models/ggml-vocab-nomic-bert-moe.gguf.out deleted file mode 100644 index c31c61092c..0000000000 --- a/models/ggml-vocab-nomic-bert-moe.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 17 297 201 78660 21775 - 72805 4097 56 - - - - - - - - - - 35378 8999 - 35378 8999 - 35378 6661 - 35378 6661 - 35378 6661 38 - 35378 4 8999 38 - 35378 4 8999 38 - 903 83 6 3 5 238 6366 - 148 7709 1019 361 458 134362 104 7 71 420 1132 - 14271 29 117152 - 6 149561 78270 48967 64254 7616 81705 - 6 247206 15 33176 16 6 247442 6 3 15755 15 144227 8705 18255 40292 158 4460 33 27686 16 6 142325 15 191 538 28 121505 450 1556 6863 10002 47 1098 16 - 35378 - 35378 - 35378 - 35378 - 35378 - 35378 35378 - 15 - 2203 - 242 1615 - 35378 4 113 25 5584 38 11249 621 398 6 201344 705 23638 213 9007 133 1879 2681 2592 135224 1906 6087 - 6 90827 - 138 - 3912 - 6 66000 - 138 66000 - 3912 66000 - 6 66000 66000 - 138 66000 66000 - 3912 66000 66000 - 6 66000 66000 66000 - 199152 3763 - 17116 99397 - 6 247206 15 33176 16 6 247442 6 3 15755 15 144227 8705 18255 40292 158 4460 33 27686 16 6 142325 6 3 138 3912 6 66000 138 66000 3912 66000 6 66000 66000 138 66000 66000 3912 66000 66000 80308 1031 5 363 138 27 363 6 149561 78270 48967 201344 705 23638 213 9007 133 1879 2681 2592 135224 1906 6087 6 110405 1369 69112 69112 69112 14271 29 117152 5106 4765 4765 1135 164721 164721 164721 58 58 58 58 2551 90827 32 85908 87 25 272 2809 242 18 18345 764 25 7 2685 4 242 11766 398 9077 32 242 594 959 9077 87 25 1181 3249 442 4 242 397 398 1884 3060 26156 32 1401 25 26455 10 25 141 866 diff --git a/models/ggml-vocab-phi-3.gguf.inp b/models/ggml-vocab-phi-3.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-phi-3.gguf.inp +++ b/models/ggml-vocab-phi-3.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-phi-3.gguf.out b/models/ggml-vocab-phi-3.gguf.out index 93aacf8bae..2a71a6ef86 100644 --- a/models/ggml-vocab-phi-3.gguf.out +++ b/models/ggml-vocab-phi-3.gguf.out @@ -1,5 +1,5 @@ 474 287 29871 29946 29871 30226 7378 - 383 4000 261 + 11585 7810 295 259 1678 diff --git a/models/ggml-vocab-pixtral.gguf.inp b/models/ggml-vocab-pixtral.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-pixtral.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-pixtral.gguf.out b/models/ggml-vocab-pixtral.gguf.out deleted file mode 100644 index 53309d1bc9..0000000000 --- a/models/ggml-vocab-pixtral.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 2014 1032 1052 1032 28504 6972 - 1070 7088 1258 - - 1032 - 1256 - 1293 - 1009 - 1010 - 1267 - 4688 - 1009 1010 - 22177 4304 - 45383 4304 - 22177 5325 - 45383 5325 - 45383 5325 1033 - 22177 1044 4304 1033 - 45383 1044 4304 1033 - 1593 1395 119685 1166 1153 1046 51228 - 1119 1048 1052 1056 1032 1055 17391 23216 30203 7785 17279 - 3337 30757 1902 4200 63073 3671 - 1225 1158 1128 1225 1158 1182 1225 1158 1147 1225 1159 1139 1225 1158 1143 1225 1159 1130 1225 1158 1150 1225 1158 1183 1225 1158 1159 1225 21359 1225 1158 1159 1225 1158 1162 1225 1158 1182 1225 1158 1133 1225 1158 1129 1225 1158 1155 1225 1158 1133 1225 21359 1225 1158 1137 - 1240 1159 1154 1128 1319 13052 1041 119685 1152 1182 29568 1240 1159 1140 1171 1239 1184 1143 1319 88181 1873 3659 1275 56421 1621 1041 126241 1133 1319 11234 1873 26303 1455 1934 2246 3754 10835 1041 - 22177 - 45383 - 1032 45383 - 1256 45383 - 1293 45383 - 1293 45383 1010 1293 45383 - 1319 - 1010 1376 - 1039 4033 - 22177 1044 1404 48054 1033 3075 1584 1636 119685 1152 1129 3082 26060 2998 63614 82278 1049 1051 1049 1052 1049 1053 1049 6434 6749 - 7290 7290 7290 - 1051 - 1051 1051 - 1051 1051 1051 - 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 1051 1051 - 1051 1051 1051 1051 1051 1051 1051 1051 1051 - 1067 59503 28783 - 3724 4058 - 1010 1032 1267 1032 4688 1032 17152 1458 29356 1010 1256 1010 1293 1010 1260 1010 1652 1010 1240 1159 1154 1128 1319 13052 1041 119685 1152 1182 29568 1240 1159 1140 1171 1239 1184 1143 1319 88181 1873 3659 1275 56421 1621 1041 126241 1133 119685 1166 1153 1240 1159 1166 1153 1032 1051 1032 1051 1051 1032 1051 1051 1051 1032 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1051 1051 1032 1051 1051 1051 1051 1051 1051 1051 1051 1032 1051 1046 1051 1032 1051 1791 1051 1032 1051 2880 1051 71881 1158 1128 1225 1158 1182 1225 1158 1147 1225 1159 1139 1225 1158 1143 1225 1159 1130 1225 1158 1150 1225 1158 1183 1225 1158 1159 1225 21359 1225 1158 1159 1225 1158 1162 1225 1158 1182 1225 1158 1133 1240 1159 1152 1129 3082 26060 2998 63614 82278 1049 1051 1049 1052 1049 1053 1049 6434 6749 45577 1045 6626 43555 2843 30757 1902 4200 63073 3671 14931 20040 20040 1657 1657 1975 14135 14135 83923 7290 7290 7290 45509 45509 45509 1362 6483 2151 1576 1116 2189 1514 1681 2156 1044 1576 3609 1636 5257 1063 1576 1077 1605 5257 1362 7534 3180 1494 1044 1576 1068 1636 2479 2269 26883 1063 2837 1039 45654 1261 54297 1076 diff --git a/models/ggml-vocab-qwen2.gguf.inp b/models/ggml-vocab-qwen2.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-qwen2.gguf.inp +++ b/models/ggml-vocab-qwen2.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-qwen2.gguf.out b/models/ggml-vocab-qwen2.gguf.out index 18b4b45cd1..595d59a449 100644 --- a/models/ggml-vocab-qwen2.gguf.out +++ b/models/ggml-vocab-qwen2.gguf.out @@ -1,5 +1,5 @@ 1122 220 19 220 26062 3951 - 37 50753 261 + 86975 15897 301 220 256 diff --git a/models/ggml-vocab-refact.gguf.inp b/models/ggml-vocab-refact.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-refact.gguf.inp +++ b/models/ggml-vocab-refact.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-refact.gguf.out b/models/ggml-vocab-refact.gguf.out index 63d8305c3f..f13dda52ce 100644 --- a/models/ggml-vocab-refact.gguf.out +++ b/models/ggml-vocab-refact.gguf.out @@ -1,5 +1,5 @@ 4833 225 38 225 143 140 17723 - 56 2006 3935 265 + 144 231 7132 342 225 261 diff --git a/models/ggml-vocab-roberta-bpe.gguf.inp b/models/ggml-vocab-roberta-bpe.gguf.inp deleted file mode 100644 index 9baf7d77ae..0000000000 --- a/models/ggml-vocab-roberta-bpe.gguf.inp +++ /dev/null @@ -1,112 +0,0 @@ -ied 4 ½ months -__ggml_vocab_test__ -Führer -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - -__ggml_vocab_test__ - - -__ggml_vocab_test__ - - - -__ggml_vocab_test__ - - - - -__ggml_vocab_test__ - - -__ggml_vocab_test__ -Hello world -__ggml_vocab_test__ - Hello world -__ggml_vocab_test__ -Hello World -__ggml_vocab_test__ - Hello World -__ggml_vocab_test__ - Hello World! -__ggml_vocab_test__ -Hello, world! -__ggml_vocab_test__ - Hello, world! -__ggml_vocab_test__ - this is 🦙.cpp -__ggml_vocab_test__ -w048 7tuijk dsdfhu -__ggml_vocab_test__ -нещо на Български -__ggml_vocab_test__ -កាន់តែពិសេសអាចខលចេញ -__ggml_vocab_test__ -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token) -__ggml_vocab_test__ -Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello -__ggml_vocab_test__ - Hello - Hello -__ggml_vocab_test__ - ( -__ggml_vocab_test__ - - = -__ggml_vocab_test__ -' era -__ggml_vocab_test__ -Hello, y'all! How are you 😁 ?我想在apple工作1314151天~ -__ggml_vocab_test__ -!!!!!! -__ggml_vocab_test__ -3 -__ggml_vocab_test__ -33 -__ggml_vocab_test__ -333 -__ggml_vocab_test__ -3333 -__ggml_vocab_test__ -33333 -__ggml_vocab_test__ -333333 -__ggml_vocab_test__ -3333333 -__ggml_vocab_test__ -33333333 -__ggml_vocab_test__ -333333333 -__ggml_vocab_test__ -Cửa Việt -__ggml_vocab_test__ - discards -__ggml_vocab_test__ - - - - - - - - - - - -🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL -__ggml_vocab_test__ diff --git a/models/ggml-vocab-roberta-bpe.gguf.out b/models/ggml-vocab-roberta-bpe.gguf.out deleted file mode 100644 index f181ac3dcc..0000000000 --- a/models/ggml-vocab-roberta-bpe.gguf.out +++ /dev/null @@ -1,46 +0,0 @@ - 2550 204 18430 377 - 597 2768 298 8564 - - 1437 - 1437 1437 - 1437 1437 1437 - 50117 - 50118 - 50140 - 50140 50118 - 50117 50118 - 31414 232 - 20920 232 - 31414 623 - 20920 623 - 20920 623 328 - 31414 6 232 328 - 20920 6 232 328 - 42 16 8103 18164 27 4 49317 - 605 40976 262 10109 18474 385 29 36807 6455 - 36765 25482 22063 23171 34251 18697 10809 26161 18697 3602 22063 27969 40966 25417 15264 26161 24269 36709 41171 35328 - 1376 17772 7471 1376 17772 19002 1376 17772 9085 1376 4333 13859 1376 17772 9357 1376 4333 9264 1376 17772 25448 1376 17772 18400 1376 17772 4333 1376 4333 10172 1376 17772 4333 1376 17772 7258 1376 17772 19002 1376 17772 5782 1376 17772 10172 1376 17772 3726 1376 17772 5782 1376 4333 10172 1376 17772 23171 - 6569 15113 7471 36 21113 43 17841 19002 17 8384 6569 14285 4958 12605 36 34654 2841 4203 354 10146 26511 1070 43 36174 5782 36 8338 21554 14 34 63 308 19233 43 - 31414 - 20920 - 1437 20920 - 1437 1437 20920 - 1437 1437 1437 20920 - 1437 1437 1437 20920 50118 1437 1437 1437 20920 - 36 - 50118 5457 - 108 3567 - 31414 6 1423 108 1250 328 1336 32 47 17841 10172 17487 47876 3602 48617 15264 46537 11423 27326 48494 8210 49233 1558 1570 27761 49429 43251 10809 17772 - 32376 12846 - 246 - 3103 - 25631 - 46152 - 3103 25631 - 46152 3103 - 46152 25631 - 46152 46152 - 46152 3103 25631 - 347 1376 2023 12410 102 16376 1376 2023 6382 90 - 9553 5954 - 50118 1437 50140 1437 50140 50118 1437 50117 1437 50117 50117 1437 50117 50118 1437 1437 50118 1437 1437 1437 50118 1437 1437 1437 1437 50118 1437 1437 1437 1437 1437 50118 6569 15113 7471 36 21113 43 17841 19002 17 8384 6569 14285 4958 12605 36 34654 2841 4203 354 10146 26511 1070 43 36174 5782 8103 18164 27 6569 18164 27 155 2357 30242 155 25631 30242 3103 30242 25631 30242 46152 30242 3103 25631 155 4 246 155 7586 246 155 734 246 25974 17772 7471 1376 17772 19002 1376 17772 9085 1376 4333 13859 1376 17772 9357 1376 4333 9264 1376 17772 25448 1376 17772 18400 1376 17772 4333 1376 4333 10172 1376 17772 4333 1376 17772 7258 1376 17772 19002 1376 17772 5782 18636 10172 17487 47876 3602 48617 15264 46537 11423 27326 48494 8210 49233 1558 1570 27761 49429 43251 10809 17772 36738 48332 47463 18697 10809 25482 22063 23171 34251 18697 10809 26161 18697 3602 22063 27969 40966 25417 15264 26161 24269 36709 41171 35328 128 49690 108 49972 49519 12905 48149 48149 43796 32376 12846 27282 28749 38 348 57 128 41042 37 18 89 6 128 4629 47 686 116 128 448 45 686 38 581 146 24 6 128 495 47 101 103 6845 116 166 108 30660 10 108 462 574 diff --git a/models/ggml-vocab-starcoder.gguf.inp b/models/ggml-vocab-starcoder.gguf.inp index 9baf7d77ae..86b934e402 100644 --- a/models/ggml-vocab-starcoder.gguf.inp +++ b/models/ggml-vocab-starcoder.gguf.inp @@ -1,6 +1,6 @@ ied 4 ½ months __ggml_vocab_test__ -Führer +Äpfel __ggml_vocab_test__ __ggml_vocab_test__ diff --git a/models/ggml-vocab-starcoder.gguf.out b/models/ggml-vocab-starcoder.gguf.out index 87e2465d36..4698e2c3c8 100644 --- a/models/ggml-vocab-starcoder.gguf.out +++ b/models/ggml-vocab-starcoder.gguf.out @@ -1,5 +1,5 @@ 4850 244 57 244 162 159 17722 - 75 2022 3943 284 + 163 250 7146 361 244 280 diff --git a/requirements/requirements-gguf_editor_gui.txt b/requirements/requirements-gguf_editor_gui.txt index 920dc7cf90..fd253364e1 100644 --- a/requirements/requirements-gguf_editor_gui.txt +++ b/requirements/requirements-gguf_editor_gui.txt @@ -1,3 +1,3 @@ numpy~=1.26.4 PySide6~=6.9.0 -gguf>=0.16.0 +gguf>=0.17.0 diff --git a/scripts/sync_vendor.py b/scripts/sync_vendor.py new file mode 100755 index 0000000000..1151c9f019 --- /dev/null +++ b/scripts/sync_vendor.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +import urllib.request + +vendor = { + "https://github.com/nlohmann/json/releases/latest/download/json.hpp": "vendor/nlohmann/json.hpp", + "https://github.com/nlohmann/json/releases/latest/download/json_fwd.hpp": "vendor/nlohmann/json_fwd.hpp", + + # sync manually + # "https://raw.githubusercontent.com/ochafik/minja/refs/heads/main/include/minja/minja.hpp": "vendor/minja/minja.hpp", + # "https://raw.githubusercontent.com/ochafik/minja/refs/heads/main/include/minja/chat-template.hpp": "vendor/minja/chat-template.hpp", + + "https://raw.githubusercontent.com/nothings/stb/refs/heads/master/stb_image.h": "vendor/stb/stb_image.h", + + "https://github.com/mackron/miniaudio/raw/refs/tags/0.11.22/miniaudio.h": "vendor/miniaudio/miniaudio.h", + + "https://raw.githubusercontent.com/yhirose/cpp-httplib/refs/tags/v0.20.1/httplib.h": "vendor/cpp-httplib/httplib.h", +} + +for url, filename in vendor.items(): + print(f"downloading {url} to {filename}") # noqa: NP100 + urllib.request.urlretrieve(url, filename) diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index abf436adac..c0590e105c 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -174,6 +174,8 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" }, { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" }, + { LLM_KV_CLASSIFIER_OUTPUT_LABELS, "%s.classifier.output_labels" }, + { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, @@ -448,6 +450,7 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_TOKEN_TYPES, "token_types" }, { LLM_TENSOR_POS_EMBD, "position_embd" }, { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, diff --git a/src/llama-arch.h b/src/llama-arch.h index 41a023da3d..930cb4eca3 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -213,6 +213,8 @@ enum llm_kv { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, LLM_KV_CONVNEXT_BLOCK_COUNT, + LLM_KV_CLASSIFIER_OUTPUT_LABELS, + // deprecated: LLM_KV_TOKENIZER_PREFIX_ID, LLM_KV_TOKENIZER_SUFFIX_ID, diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp index b98e3256c3..6a19a24311 100644 --- a/src/llama-batch.cpp +++ b/src/llama-batch.cpp @@ -15,24 +15,31 @@ llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { break; } } - ubatch_token.resize(!has_embd ? n_ubatch : 0); - ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0); - ubatch_pos.resize(n_ubatch); - ubatch_n_seq_id.resize(n_ubatch); - ubatch_seq_id.resize(n_ubatch); - ubatch_output.resize(n_ubatch); + + udatas.push_back({}); + + auto & udata = udatas.back(); + + udata.token.resize(!has_embd ? n_ubatch : 0); + udata.embd.resize(has_embd ? n_embd * n_ubatch : 0); + udata.pos.resize(n_ubatch); + udata.n_seq_id.resize(n_ubatch); + udata.seq_id.resize(n_ubatch); + udata.output.resize(n_ubatch); + llama_ubatch ubatch = { /*equal_seqs =*/ true, /*n_tokens =*/ 0, /*n_seq_tokens =*/ 0, /*n_seqs =*/ 0, - /*token =*/ !has_embd ? ubatch_token.data() : nullptr, - /*embd =*/ has_embd ? ubatch_embd.data() : nullptr, - /*pos =*/ ubatch_pos.data(), - /*n_seq_id =*/ ubatch_n_seq_id.data(), - /*seq_id =*/ ubatch_seq_id.data(), - /*output =*/ ubatch_output.data(), + /*token =*/ !has_embd ? udata.token.data() : nullptr, + /*embd =*/ has_embd ? udata.embd.data() : nullptr, + /*pos =*/ udata.pos.data(), + /*n_seq_id =*/ udata.n_seq_id.data(), + /*seq_id =*/ udata.seq_id.data(), + /*output =*/ udata.output.data(), }; + return ubatch; } diff --git a/src/llama-batch.h b/src/llama-batch.h index 6305051b62..b8260b94fd 100644 --- a/src/llama-batch.h +++ b/src/llama-batch.h @@ -11,15 +11,15 @@ struct llama_ubatch { bool equal_seqs; // TODO: whole_seqs for embeddings? - uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) + uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) uint32_t n_seq_tokens; // tokens per sequence uint32_t n_seqs; llama_token * token; // [n_tokens] float * embd; // [n_embd, n_tokens] llama_pos * pos; // [n_tokens] - int32_t * n_seq_id; // [n_seqs] - llama_seq_id ** seq_id; // [n_seqs] + int32_t * n_seq_id; // [n_seqs] // TODO: remove, should belong to only 1 sequence + llama_seq_id ** seq_id; // [n_seqs] // TODO: become llama_seq_id * seq_id; int8_t * output; // [n_tokens] }; @@ -49,13 +49,18 @@ struct llama_sbatch { const llama_batch * batch = nullptr; - // buffers for the ubatch - std::vector ubatch_token; - std::vector ubatch_embd; - std::vector ubatch_pos; - std::vector ubatch_n_seq_id; - std::vector ubatch_seq_id; - std::vector ubatch_output; + // buffers for the ubatches + // TODO: very hacky, this needs a complete rework + struct ubatch_data { + std::vector token; + std::vector embd; + std::vector pos; + std::vector n_seq_id; + std::vector seq_id; + std::vector output; + }; + + std::vector udatas; llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false); diff --git a/src/llama-context.cpp b/src/llama-context.cpp index e153351af3..4ab5743879 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -6,9 +6,10 @@ #include "llama-model.h" #include "llama-kv-cache.h" -#include -#include #include +#include +#include +#include // // llama_context @@ -122,6 +123,11 @@ llama_context::llama_context( __func__, n_ctx_per_seq, hparams.n_ctx_train); } + if (!params.swa_full && cparams.n_seq_max > 1) { + LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n", + __func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573"); + } + if (!hparams.vocab_only) { // GPU backends for (auto * dev : model.devices) { @@ -259,15 +265,9 @@ llama_context::llama_context( // reserve worst-case graph if (!hparams.vocab_only && memory) { - const uint32_t n_seqs = 1; // TODO: worst-case number of sequences + const uint32_t n_seqs = cparams.n_seq_max; const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - - // restore later - // TODO: something cleaner - const auto n_outputs_save = n_outputs; - LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); int n_splits_pp = -1; @@ -279,23 +279,17 @@ llama_context::llama_context( // simulate full KV cache llama_kv_cache * kv_self = static_cast(memory.get()); - kv_self->set_full(); + const auto kv_state = kv_self->init_full(); + if (!kv_state) { + throw std::runtime_error("failed to initialize KV cache"); + } cross.v_embd.clear(); // reserve pp graph first so that buffers are only allocated once { - llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - // max number of outputs - n_outputs = ubatch_pp.n_tokens; - - LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_pp.n_tokens, ubatch_pp.n_seqs); - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); - - if (!ggml_backend_sched_reserve(sched.get(), gf)) { + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, kv_state.get()); + if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } @@ -305,16 +299,8 @@ llama_context::llama_context( // reserve with tg graph to get the number of splits and nodes { - llama_ubatch ubatch_tg = { true, 1, 1, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - n_outputs = ubatch_tg.n_tokens; - - LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_tg.n_tokens, ubatch_tg.n_seqs); - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch_tg, LLM_GRAPH_TYPE_DEFAULT); - - if (!ggml_backend_sched_reserve(sched.get(), gf)) { + auto * gf = graph_reserve(1, 1, 1, kv_state.get()); + if (!gf) { throw std::runtime_error("failed to allocate compute tg buffers"); } @@ -324,22 +310,12 @@ llama_context::llama_context( // reserve again with pp graph to avoid ggml-alloc reallocations during inference { - llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - n_outputs = ubatch_pp.n_tokens; - - LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_pp.n_tokens, ubatch_pp.n_seqs); - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); - - if (!ggml_backend_sched_reserve(sched.get(), gf)) { + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, kv_state.get()); + if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } } - n_outputs = n_outputs_save; - for (size_t i = 0; i < backend_ptrs.size(); ++i) { ggml_backend_t backend = backend_ptrs[i]; ggml_backend_buffer_type_t buft = backend_buft[i]; @@ -453,36 +429,33 @@ const llama_kv_cache * llama_context::get_kv_self() const { return kv_self; } -void llama_context::kv_self_update() { - bool need_reserve = false; +bool llama_context::kv_self_update() { + if (!memory) { + return false; + } llama_kv_cache * kv_self = static_cast(memory.get()); - need_reserve = kv_self->update(*this); - - // reserve a worst case graph if needed - if (need_reserve) { - LLAMA_LOG_DEBUG("%s: reserving a worst case graph\n", __func__); - - // build worst-case graph - uint32_t n_seqs = 1; // TODO: worst-case number of sequences - uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - - // simulate full KV cache - kv_self->set_full(); - - llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT); - - // initialize scheduler with the worst-case graph - ggml_backend_sched_reset(sched.get()); - if (!ggml_backend_sched_reserve(sched.get(), gf)) { - LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); - } + if (!kv_self->update(*this)) { + // no updates have been performed + return false; } + + // if the KV cache did any computation, we have to reserve a new worst-case graph + const auto kv_state = kv_self->init_full(); + if (!kv_state) { + throw std::runtime_error("failed to initialize KV cache"); + } + + const uint32_t n_seqs = cparams.n_seq_max; + const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); + + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, kv_state.get()); + if (!gf) { + LLAMA_LOG_ERROR("%s: failed to reserve graph after the KV cache update\n", __func__); + } + + return true; } enum llama_pooling_type llama_context::pooling_type() const { @@ -676,6 +649,49 @@ bool llama_context::apply_adapter_cvec( return cvec.apply(model, data, len, n_embd, il_start, il_end); } +llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_state_i * mstate, ggml_status & ret) { + if (mstate && !mstate->apply()) { + LLAMA_LOG_ERROR("%s: failed to apply memory state\n", __func__); + ret = GGML_STATUS_FAILED; + return nullptr; + } + + auto * gf = graph_init(); + if (!gf) { + LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__); + ret = GGML_STATUS_FAILED; + return nullptr; + } + + auto res = graph_build(ctx_compute.get(), gf, ubatch, gtype, mstate); + if (!res) { + LLAMA_LOG_ERROR("%s: failed to build graph\n", __func__); + ret = GGML_STATUS_FAILED; + return nullptr; + } + + // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + + if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__); + ret = GGML_STATUS_ALLOC_FAILED; + return nullptr; + } + + res->set_inputs(&ubatch); + + const auto status = graph_compute(gf, ubatch.n_tokens > 1); + if (status != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status); + ret = status; + return nullptr; + } + + ret = GGML_STATUS_SUCCESS; + + return res; +} + int llama_context::encode(llama_batch & inp_batch) { if (inp_batch.n_tokens == 0) { LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); @@ -737,8 +753,6 @@ int llama_context::encode(llama_batch & inp_batch) { n_outputs = n_tokens; - //batch_manager->prepare(ubatch); - ggml_backend_sched_reset(sched.get()); ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); @@ -749,26 +763,18 @@ int llama_context::encode(llama_batch & inp_batch) { // ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223 cparams.causal_attn = false; - auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_ENCODER); - - ggml_backend_sched_alloc_graph(sched.get(), gf); - - res->set_inputs(&ubatch); + ggml_status status; + const auto res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status); cparams.causal_attn = causal_attn_org; - const auto compute_status = graph_compute(gf, n_tokens > 1); - switch (compute_status) { - case GGML_STATUS_SUCCESS: - break; - case GGML_STATUS_ABORTED: - return 2; - case GGML_STATUS_ALLOC_FAILED: - return -2; - case GGML_STATUS_FAILED: - default: - return -3; + if (!res) { + switch (status) { + case GGML_STATUS_ABORTED: return 2; + case GGML_STATUS_ALLOC_FAILED: return -2; + case GGML_STATUS_FAILED: return -3; + case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); + } } auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd(); @@ -889,8 +895,6 @@ int llama_context::decode(llama_batch & inp_batch) { const int64_t n_tokens_all = batch.n_tokens; const int64_t n_embd = hparams.n_embd; - llama_kv_cache_guard kv_guard(kv_self); - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT // TODO: move the validation to the llama_batch_allocr @@ -936,7 +940,48 @@ int llama_context::decode(llama_batch & inp_batch) { n_outputs_all = 1; } - llama_sbatch sbatch = kv_self->sbatch_init(batch, /* logits_all */ n_outputs_all == n_tokens_all); + // handle any pending defrags/shifts + kv_self_update(); + + llama_memory_state_ptr kv_state; + + bool did_defrag = false; + + while (true) { + kv_state = kv_self->init_batch(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ n_outputs_all == n_tokens_all); + if (!kv_state) { + return -2; + } + + switch (kv_state->get_status()) { + case LLAMA_MEMORY_STATUS_SUCCESS: + { + } break; + case LLAMA_MEMORY_STATUS_FAILED_PREPARE: + { + if (!did_defrag) { + did_defrag = true; + + kv_self->defrag_sched(-1.0f); + if (kv_self_update()) { + LLAMA_LOG_DEBUG("%s: failed to init batch of size %d, retrying after defrag\n", __func__, batch.n_tokens); + + continue; + } + } + + LLAMA_LOG_WARN("%s: failed to find KV cache slot for batch of size %d\n", __func__, batch.n_tokens); + + return 1; + } + case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: + { + return -2; + } + } + + break; + } // reserve output buffer if (output_reserve(n_outputs_all) < n_outputs_all) { @@ -944,13 +989,10 @@ int llama_context::decode(llama_batch & inp_batch) { return -2; }; - // handle any pending defrags/shifts - kv_self_update(); - int64_t n_outputs_prev = 0; - while (sbatch.n_tokens > 0) { - llama_ubatch ubatch = kv_self->ubatch_next(sbatch, cparams.n_ubatch, embd_pooled); + do { + const auto & ubatch = kv_state->get_ubatch(); // count the outputs in this u_batch { @@ -969,33 +1011,37 @@ int llama_context::decode(llama_batch & inp_batch) { n_outputs = n_outputs_new; } - // find KV slot - if (!kv_self->find_slot(ubatch)) { - return 1; - } - ggml_backend_sched_reset(sched.get()); ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); - auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DECODER); + ggml_status status; + const auto res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, kv_state.get(), status); - // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + if (!res) { + // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache + llama_pos pos_min[LLAMA_MAX_PARALLEL_SEQUENCES] = { std::numeric_limits::max() }; - ggml_backend_sched_alloc_graph(sched.get(), gf); + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + const auto & seq_id = ubatch.seq_id[i][0]; - res->set_inputs(&ubatch); + pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]); + } - const auto compute_status = graph_compute(gf, ubatch.n_tokens > 1); - if (compute_status != GGML_STATUS_SUCCESS) { - switch (compute_status) { - case GGML_STATUS_ABORTED: - return 2; - case GGML_STATUS_ALLOC_FAILED: - return -2; - case GGML_STATUS_FAILED: - default: - return -3; + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (pos_min[s] == std::numeric_limits::max()) { + continue; + } + + LLAMA_LOG_WARN("%s: removing KV cache entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]); + + llama_kv_self_seq_rm(this, s, pos_min[s], -1); + } + + switch (status) { + case GGML_STATUS_ABORTED: return 2; + case GGML_STATUS_ALLOC_FAILED: return -2; + case GGML_STATUS_FAILED: return -3; + case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); } } @@ -1082,10 +1128,7 @@ int llama_context::decode(llama_batch & inp_batch) { } n_outputs_prev += n_outputs; - } - - // finalize the batch processing - kv_guard.commit(); + } while (kv_state->next()); // set to total number of outputs in the batch, for use in llama_get_logits_ith n_outputs = n_outputs_all; @@ -1094,7 +1137,7 @@ int llama_context::decode(llama_batch & inp_batch) { { bool sorted_output = true; - auto & out_ids = sbatch.out_ids; + auto & out_ids = kv_state->out_ids(); GGML_ASSERT(out_ids.size() == (size_t) n_outputs_all); @@ -1254,11 +1297,52 @@ ggml_cgraph * llama_context::graph_init() { return ggml_new_graph_custom(ctx_compute.get(), graph_max_nodes(), false); } +ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_state_i * mstate) { + LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); + + if (n_tokens % n_seqs != 0) { + n_tokens = (n_tokens / n_seqs) * n_seqs; + n_outputs = std::min(n_outputs, n_tokens); + + LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs); + } + + // store the n_outputs as it is, and restore it afterwards + // TODO: not sure if needed, might simplify in the future by removing this + const auto save_n_outputs = this->n_outputs; + + this->n_outputs = n_outputs; + + llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph + llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; + + auto * gf = graph_init(); + auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate); + + this->n_outputs = save_n_outputs; + + if (!res) { + LLAMA_LOG_ERROR("%s: failed to build worst-case graph\n", __func__); + return nullptr; + } + + ggml_backend_sched_reset(sched.get()); + + // initialize scheduler with the specified graph + if (!ggml_backend_sched_reserve(sched.get(), gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); + return nullptr; + } + + return gf; +} + llm_graph_result_ptr llama_context::graph_build( - ggml_context * ctx, - ggml_cgraph * gf, - const llama_ubatch & ubatch, - llm_graph_type gtype) { + ggml_context * ctx, + ggml_cgraph * gf, + const llama_ubatch & ubatch, + llm_graph_type gtype, + const llama_memory_state_i * mstate) { return model.build_graph( { /*.ctx =*/ ctx, @@ -1270,7 +1354,7 @@ llm_graph_result_ptr llama_context::graph_build( /*.backend_cpu =*/ backend_cpu, /*.cvec =*/ &cvec, /*.loras =*/ &loras, - /*.memory =*/ memory.get(), + /*.mstate =*/ mstate, /*.cross =*/ &cross, /*.n_outputs =*/ n_outputs, /*.cb =*/ graph_get_cb(), @@ -1951,7 +2035,6 @@ void llama_context::opt_epoch_iter( llama_kv_cache * kv_self = static_cast(memory.get()); kv_self->clear(); - llama_kv_cache_guard kv_guard(kv_self); for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) { batch.n_tokens = n_batch; @@ -1974,7 +2057,11 @@ void llama_context::opt_epoch_iter( int64_t n_outputs_all = n_tokens_all; - llama_sbatch sbatch = kv_self->sbatch_init(batch, /*logits_all =*/ true); + auto kv_state = kv_self->init_batch(batch, cparams.n_ubatch, embd_pooled, /* logits_all */ true); + if (!kv_state || kv_state->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); + break; + } // reserve output buffer if (output_reserve(n_outputs_all) < n_outputs_all) { @@ -1982,20 +2069,19 @@ void llama_context::opt_epoch_iter( GGML_ABORT("TODO: handle this error"); }; - for (uint32_t pos_batch = 0; pos_batch < n_batch; pos_batch += n_ubatch) { - llama_ubatch ubatch = kv_self->ubatch_next(sbatch, cparams.n_ubatch, embd_pooled); + uint32_t pos_batch = 0; + do { + const auto & ubatch = kv_state->get_ubatch(); n_outputs = ubatch.n_tokens; - // TODO: not sure if this is needed - if (!kv_self->find_slot(ubatch)) { - LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); - - GGML_ABORT("TODO: handle this error"); + if (!kv_state->apply()) { + LLAMA_LOG_ERROR("%s: failed to update the memory state\n", __func__); + break; } auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT); + auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, kv_state.get()); struct ggml_context * ctx_compute_opt; { @@ -2010,6 +2096,7 @@ void llama_context::opt_epoch_iter( } ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_tokens(), res->get_logits()); ggml_opt_alloc(opt_ctx, train); + res->set_inputs(&ubatch); { struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); @@ -2027,10 +2114,10 @@ void llama_context::opt_epoch_iter( callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start); } ggml_free(ctx_compute_opt); - } - } - kv_guard.commit(); + pos_batch += ubatch.n_tokens; + } while (kv_state->next()); + } } void llama_context::opt_epoch( @@ -2194,6 +2281,7 @@ llama_kv_cache * llama_get_kv_self(llama_context * ctx) { return ctx->get_kv_self(); } +// deprecated void llama_kv_self_update(llama_context * ctx) { ctx->kv_self_update(); } @@ -2448,6 +2536,7 @@ llama_pos llama_kv_self_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) { return kv->seq_pos_max(seq_id); } +// deprecated void llama_kv_self_defrag(llama_context * ctx) { auto * kv = ctx->get_kv_self(); if (!kv) { @@ -2589,22 +2678,8 @@ int32_t llama_encode( int32_t llama_decode( llama_context * ctx, llama_batch batch) { - int ret = ctx->decode(batch); - - // defrag and try again - // TODO: distinguish return code when we are sure that even after defrag there is no space available - if (ret == 1) { - llama_kv_self_defrag(ctx); - ret = ctx->decode(batch); - - if (ret == 1) { - LLAMA_LOG_WARN("%s: failed to find KV cache slot for batch of size %d\n", __func__, batch.n_tokens); - - return ret; - } - } - - if (ret != 0) { + const int ret = ctx->decode(batch); + if (ret != 0 && ret != 1) { LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret); } diff --git a/src/llama-context.h b/src/llama-context.h index c0ceacb10c..3b880286bf 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -18,6 +18,9 @@ struct llama_kv_cache; class llama_io_read_i; class llama_io_write_i; +class llama_memory_i; +class llama_memory_state_i; + struct llama_context { // init scheduler and compute buffers, reserve worst-case graphs llama_context( @@ -47,7 +50,9 @@ struct llama_context { llama_kv_cache * get_kv_self(); const llama_kv_cache * get_kv_self() const; - void kv_self_update(); + // return true of the KV cache was updated + // TODO: remove + bool kv_self_update(); enum llama_pooling_type pooling_type() const; @@ -88,6 +93,16 @@ struct llama_context { int32_t il_start, int32_t il_end); + // process a single ubatch with a specific graph type + // if memory_state is provided, it will be applied first to the context's memory + // ret contains the status of the graph computation + // returns nullptr only if ret != GGML_STATUS_SUCCESS + llm_graph_result_ptr process_ubatch( + const llama_ubatch & ubatch, + llm_graph_type gtype, + llama_memory_state_i * mstate, + ggml_status & ret); + int encode(llama_batch & inp_batch); int decode(llama_batch & inp_batch); @@ -180,16 +195,18 @@ public: ggml_cgraph * graph_init(); // returns the result of ggml_backend_sched_graph_compute_async execution - ggml_status graph_compute( - ggml_cgraph * gf, - bool batched); + ggml_status graph_compute(ggml_cgraph * gf, bool batched); + + // reserve a graph with a dummy ubatch of the specified size + ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_state_i * mstate); private: llm_graph_result_ptr graph_build( - ggml_context * ctx, - ggml_cgraph * gf, - const llama_ubatch & ubatch, - llm_graph_type gtype); + ggml_context * ctx, + ggml_cgraph * gf, + const llama_ubatch & ubatch, + llm_graph_type gtype, + const llama_memory_state_i * mstate); llm_graph_cb graph_get_cb() const; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index cdd5887de9..b30f6fb4f4 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -83,7 +83,7 @@ void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { if (pos_bucket) { - kv_self->set_input_pos_bucket(pos_bucket, ubatch); + kv_state->set_input_pos_bucket(pos_bucket, ubatch); } } @@ -234,7 +234,7 @@ void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); - const int64_t n_kv = kv_self->n; + const int64_t n_kv = kv_state->get_n_kv(); if (s_copy) { GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); @@ -242,7 +242,7 @@ void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n for (uint32_t i = 0; i < n_kv; ++i) { - data[i] = kv_self->s_copy(i); + data[i] = kv_state->s_copy(i); } } } @@ -250,7 +250,7 @@ void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); - const int64_t n_kv = kv_self->n; + const int64_t n_kv = kv_state->get_n_kv(); if (s_mask) { GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer)); @@ -258,7 +258,7 @@ void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) { // clear unused states for (int i = 0; i < n_kv; ++i) { - data[i] = kv_self->s_mask(i); + data[i] = kv_state->s_mask(i); } } } @@ -362,17 +362,17 @@ void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) { if (self_kq_mask) { - kv_self->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + kv_state->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); } } void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch) { if (self_kq_mask) { - kv_self->get_kv_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + kv_state->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); } if (self_kq_mask_swa) { - kv_self->get_kv_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); + kv_state->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); } } @@ -448,14 +448,14 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : backend_cpu (params.backend_cpu), cvec (params.cvec), loras (params.loras), - memory (params.memory), + mstate (params.mstate), cross (params.cross), cb_func (params.cb), res (std::make_unique()) { } int64_t llm_graph_context::n_pos_per_embd() const { - return arch == LLM_ARCH_QWEN2VL ? 4 : 1; + return hparams.rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1; } void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { @@ -954,11 +954,11 @@ ggml_tensor * llm_graph_context::build_inp_cls() const { } ggml_tensor * llm_graph_context::build_inp_s_copy() const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - auto inp = std::make_unique(kv_self); + auto inp = std::make_unique(kv_state); - const auto n_kv = kv_self->n; + const auto n_kv = kv_state->get_n_kv(); auto & cur = inp->s_copy; @@ -971,11 +971,11 @@ ggml_tensor * llm_graph_context::build_inp_s_copy() const { } ggml_tensor * llm_graph_context::build_inp_s_mask() const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - auto inp = std::make_unique(kv_self); + auto inp = std::make_unique(kv_state); - const auto n_kv = kv_self->n; + const auto n_kv = kv_state->get_n_kv(); auto & cur = inp->s_mask; @@ -1025,11 +1025,11 @@ ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { } ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { - const llama_kv_cache_unified * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - auto inp = std::make_unique(hparams, kv_self); + auto inp = std::make_unique(hparams, kv_state); - const auto n_kv = kv_self->get_n(); + const auto n_kv = kv_state->get_n_kv(); auto & cur = inp->pos_bucket; @@ -1231,14 +1231,14 @@ ggml_tensor * llm_graph_context::build_attn( } llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { - const llama_kv_cache_unified * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - auto inp = std::make_unique(hparams, cparams, kv_self); + auto inp = std::make_unique(hparams, cparams, kv_state); { GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified_iswa for SWA"); - const auto n_kv = kv_self->get_n(); + const auto n_kv = kv_state->get_n_kv(); inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); @@ -1268,19 +1268,19 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); - const llama_kv_cache_unified * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); // store to KV cache { - ggml_build_forward_expand(gf, kv_self->cpy_k(ctx0, k_cur, il)); - ggml_build_forward_expand(gf, kv_self->cpy_v(ctx0, v_cur, il)); + ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il)); + ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il)); } const auto & kq_mask = inp->get_kq_mask(); ggml_tensor * q = q_cur; - ggml_tensor * k = kv_self->get_k(ctx0, il); - ggml_tensor * v = kv_self->get_v(ctx0, il); + ggml_tensor * k = kv_state->get_k(ctx0, il); + ggml_tensor * v = kv_state->get_v(ctx0, il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale); cb(cur, "kqv_out", il); @@ -1301,12 +1301,12 @@ ggml_tensor * llm_graph_context::build_attn( } llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const { - const llama_kv_cache_unified_iswa * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - auto inp = std::make_unique(hparams, cparams, kv_self); + auto inp = std::make_unique(hparams, cparams, kv_state); { - const auto n_kv = kv_self->get_kv_base()->get_n(); + const auto n_kv = kv_state->get_base()->get_n_kv(); inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); @@ -1318,7 +1318,7 @@ llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unif { GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA"); - const auto n_kv = kv_self->get_kv_swa()->get_n(); + const auto n_kv = kv_state->get_swa()->get_n_kv(); inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); @@ -1348,23 +1348,23 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); + const auto * kv_state_iswa = static_cast(mstate); + const bool is_swa = hparams.is_swa(il); - const llama_kv_cache_unified_iswa * kv_self = static_cast(memory); - - const auto * kv = is_swa ? kv_self->get_kv_swa() : kv_self->get_kv_base(); + const auto * kv_state = is_swa ? kv_state_iswa->get_swa() : kv_state_iswa->get_base(); // store to KV cache { - ggml_build_forward_expand(gf, kv->cpy_k(ctx0, k_cur, il)); - ggml_build_forward_expand(gf, kv->cpy_v(ctx0, v_cur, il)); + ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il)); + ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il)); } const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); ggml_tensor * q = q_cur; - ggml_tensor * k = kv->get_k(ctx0, il); - ggml_tensor * v = kv->get_v(ctx0, il); + ggml_tensor * k = kv_state->get_k(ctx0, il); + ggml_tensor * v = kv_state->get_v(ctx0, il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale); cb(cur, "kqv_out", il); @@ -1446,12 +1446,12 @@ ggml_tensor * llm_graph_context::build_copy_mask_state( ggml_tensor * state_mask, int32_t n_state, int32_t n_seqs) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - const auto n_kv = kv_self->n; - const auto kv_head = kv_self->head; + const auto n_kv = kv_state->get_n_kv(); + const auto kv_head = kv_state->get_head(); - ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_self->size); + ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_state->get_size()); // copy states // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv @@ -1478,13 +1478,13 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); const auto token_shift_count = hparams.token_shift_count; const int64_t n_seqs = ubatch.n_seqs; - ggml_tensor * token_shift_all = kv_self->k_l[il]; + ggml_tensor * token_shift_all = kv_state->get_k_l(il); ggml_tensor * token_shift = build_copy_mask_state( gf, token_shift_all, state_copy, state_mask, @@ -1499,19 +1499,19 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( ggml_tensor * token_shift, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); const auto token_shift_count = hparams.token_shift_count; const auto n_embd = hparams.n_embd; const int64_t n_seqs = ubatch.n_seqs; - const auto kv_head = kv_self->head; + const auto kv_head = kv_state->get_head(); return ggml_cpy( ctx0, ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), - ggml_view_1d(ctx0, kv_self->k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self->k_l[il])) + ggml_view_1d(ctx0, kv_state->get_k_l(il), hparams.n_embd_k_s()*n_seqs, hparams.n_embd_k_s()*kv_head*ggml_element_size(kv_state->get_k_l(il))) ); } @@ -1562,20 +1562,25 @@ void llm_graph_context::build_pooling( ggml_tensor * inp_cls = build_inp_cls(); inp = ggml_get_rows(ctx0, inp, inp_cls); - // classification head - // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 - GGML_ASSERT(cls != nullptr); - GGML_ASSERT(cls_b != nullptr); + if (cls != nullptr && cls_b != nullptr) { + // classification head + // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b); + cur = ggml_tanh(ctx0, cur); - cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b); - cur = ggml_tanh(ctx0, cur); - - // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en - // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 - if (cls_out) { + // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en + // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 + if (cls_out) { + GGML_ASSERT(cls_out_b != nullptr); + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b); + } + } else if (cls_out) { + // Single layer classification head (direct projection) + // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476 GGML_ASSERT(cls_out_b != nullptr); - - cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b); + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, cls_out, inp), cls_out_b); + } else { + GGML_ABORT("RANK pooling requires either cls+cls_b or cls_out+cls_out_b"); } } break; default: diff --git a/src/llama-graph.h b/src/llama-graph.h index 2b85bb25be..d1c5dd1bf0 100644 --- a/src/llama-graph.h +++ b/src/llama-graph.h @@ -17,10 +17,11 @@ struct ggml_tensor; struct llama_ubatch; struct llama_cparams; -class llama_memory_i; -class llama_kv_cache_unified; -class llama_kv_cache_unified_iswa; -class llama_kv_cache_recurrent; +class llama_memory_state_i; + +class llama_kv_cache_unified_state; +class llama_kv_cache_unified_iswa_state; +class llama_kv_cache_recurrent_state; // certain models (typically multi-modal) can produce different types of graphs enum llm_graph_type { @@ -133,7 +134,7 @@ class llm_graph_input_pos_bucket_kv : public llm_graph_input_i { public: llm_graph_input_pos_bucket_kv( const llama_hparams & hparams, - const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {} + const llama_kv_cache_unified_state * kv_state) : hparams(hparams), kv_state(kv_state) {} virtual ~llm_graph_input_pos_bucket_kv() = default; void set_input(const llama_ubatch * ubatch) override; @@ -141,7 +142,7 @@ public: ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch] const llama_hparams & hparams; - const llama_kv_cache_unified * kv_self; + const llama_kv_cache_unified_state * kv_state; }; class llm_graph_input_out_ids : public llm_graph_input_i { @@ -188,26 +189,26 @@ public: class llm_graph_input_s_copy : public llm_graph_input_i { public: - llm_graph_input_s_copy(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {} + llm_graph_input_s_copy(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {} virtual ~llm_graph_input_s_copy() = default; void set_input(const llama_ubatch * ubatch) override; ggml_tensor * s_copy; // I32 [kv_size] - const llama_kv_cache_recurrent * kv_self; + const llama_kv_cache_recurrent_state * kv_state; }; class llm_graph_input_s_mask : public llm_graph_input_i { public: - llm_graph_input_s_mask(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {} + llm_graph_input_s_mask(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {} virtual ~llm_graph_input_s_mask() = default; void set_input(const llama_ubatch * ubatch) override; ggml_tensor * s_mask; // F32 [1, n_kv] - const llama_kv_cache_recurrent * kv_self; + const llama_kv_cache_recurrent_state * kv_state; }; class llm_graph_input_cross_embd : public llm_graph_input_i { @@ -247,10 +248,10 @@ public: llm_graph_input_attn_kv_unified( const llama_hparams & hparams, const llama_cparams & cparams, - const llama_kv_cache_unified * kv_self) : + const llama_kv_cache_unified_state * kv_state) : hparams(hparams), cparams(cparams), - kv_self(kv_self) { + kv_state(kv_state) { } ~llm_graph_input_attn_kv_unified() = default; @@ -264,7 +265,7 @@ public: const llama_hparams & hparams; const llama_cparams & cparams; - const llama_kv_cache_unified * kv_self; + const llama_kv_cache_unified_state * kv_state; }; class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i { @@ -272,10 +273,10 @@ public: llm_graph_input_attn_kv_unified_iswa( const llama_hparams & hparams, const llama_cparams & cparams, - const llama_kv_cache_unified_iswa * kv_self) : + const llama_kv_cache_unified_iswa_state * kv_state) : hparams(hparams), cparams(cparams), - kv_self(kv_self) { + kv_state(kv_state) { } ~llm_graph_input_attn_kv_unified_iswa() = default; @@ -292,7 +293,7 @@ public: const llama_hparams & hparams; const llama_cparams & cparams; - const llama_kv_cache_unified_iswa * kv_self; + const llama_kv_cache_unified_iswa_state * kv_state; }; class llm_graph_input_attn_cross : public llm_graph_input_i { @@ -383,10 +384,10 @@ struct llm_graph_params { ggml_backend_sched_t sched; ggml_backend_t backend_cpu; - const llama_adapter_cvec * cvec; - const llama_adapter_loras * loras; - const llama_memory_i * memory; - const llama_cross * cross; + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_state_i * mstate; + const llama_cross * cross; int32_t n_outputs; @@ -435,10 +436,10 @@ struct llm_graph_context { ggml_backend_t backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove? - const llama_adapter_cvec * cvec; - const llama_adapter_loras * loras; - const llama_memory_i * memory; - const llama_cross * cross; + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_state_i * mstate; + const llama_cross * cross; const llm_graph_cb & cb_func; diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 2d72eab180..b2bcb8b01a 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -131,6 +131,9 @@ struct llama_hparams { bool attn_soft_cap = false; bool use_kq_norm = true; + // for Classifiers + uint32_t n_cls_out = 1; + // llama4 uint32_t n_moe_layer_step = 0; uint32_t n_no_rope_layer_step = 4; diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp index 4a42d6ecdc..447c09c969 100644 --- a/src/llama-kv-cache.cpp +++ b/src/llama-kv-cache.cpp @@ -17,11 +17,6 @@ // llama_kv_cache_unified // -uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) { - // the FA kernels require padding to avoid extra runtime boundary checks - return cparams.flash_attn ? 256u : 32u; -} - llama_kv_cache_unified::llama_kv_cache_unified( const llama_model & model, layer_filter_cb && filter, @@ -293,26 +288,81 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { return cells.seq_pos_max(seq_id); } -void llama_kv_cache_unified::restore() { - for (auto & state : recovery.states) { - cells.set(state.i, state.cells); +llama_memory_state_ptr llama_kv_cache_unified::init_batch( + const llama_batch & batch, + uint32_t n_ubatch, + bool embd_pooled, + bool logits_all) { + GGML_UNUSED(embd_pooled); + + auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); + + std::vector ubatches; + while (sbatch.n_tokens > 0) { + ubatches.push_back(sbatch.split_simple(n_ubatch)); } - recovery.clear(); + auto heads = prepare(ubatches); + if (heads.empty()) { + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, + this, std::move(sbatch), std::move(heads), std::move(ubatches)); } -void llama_kv_cache_unified::commit() { - if (recovery.states.empty()) { - LLAMA_LOG_WARN("%s: the recovery information upon a commit was empty - might indicate a bug (ref: %s)\n", - __func__, "https://github.com/ggml-org/llama.cpp/pull/13194"); - return; +llama_memory_state_ptr llama_kv_cache_unified::init_full() { + return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, this); +} + +std::vector llama_kv_cache_unified::prepare(const std::vector & ubatches) { + std::vector res; + + struct state { + uint32_t head_old; // old position of the head, before placing the ubatch + uint32_t head_new; // new position of the head, after placing the ubatch + + llama_kv_cells_unified cells; // copy of the old cells, before placing the ubatch + }; + + // remember the old state of the cells so we can restore it in the end + std::vector states; + + bool success = true; + + for (const auto & ubatch : ubatches) { + // only find a suitable slot for the ubatch. don't modify the cells yet + const int32_t head_new = find_slot(ubatch); + if (head_new < 0) { + success = false; + break; + } + + // remeber the position that we found + res.push_back(head_new); + + // store the old state of the cells in the recovery stack + states.push_back({head, (uint32_t) head_new, cells.cp(head_new, ubatch.n_tokens)}); + + // now emplace the ubatch + apply_ubatch(head_new, ubatch); } - recovery.clear(); + // iterate backwards and restore the cells to their original state + for (auto it = states.rbegin(); it != states.rend(); ++it) { + cells.set(it->head_new, it->cells); + head = it->head_old; + } + + if (!success) { + return {}; + } + + return res; } bool llama_kv_cache_unified::update(llama_context & lctx) { - bool need_reserve = false; + bool updated = false; auto * sched = lctx.get_sched(); @@ -330,14 +380,24 @@ bool llama_kv_cache_unified::update(llama_context & lctx) { auto * gf = lctx.graph_init(); auto res = build_graph_shift(lctx.get_cparams(), lctx.get_ctx_compute(), gf); + if (!res) { + LLAMA_LOG_ERROR("%s: failed to build graph for K-shift\n", __func__); + return updated; + } - ggml_backend_sched_alloc_graph(sched, gf); + if (!ggml_backend_sched_alloc_graph(sched, gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute graph for K-shift\n", __func__); + return updated; + } res->set_inputs(nullptr); - lctx.graph_compute(gf, false); + if (lctx.graph_compute(gf, false) != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: failed to compute K-shift\n", __func__); + return updated; + } - need_reserve = true; + updated = true; } cells.reset_shift(); @@ -352,26 +412,38 @@ bool llama_kv_cache_unified::update(llama_context & lctx) { auto * gf = lctx.graph_init(); auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf); + if (!res) { + LLAMA_LOG_ERROR("%s: failed to build graph for defrag\n", __func__); + return updated; + } - ggml_backend_sched_alloc_graph(sched, gf); + if (!ggml_backend_sched_alloc_graph(sched, gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute graph for defrag\n", __func__); + return updated; + } res->set_inputs(nullptr); - lctx.graph_compute(gf, false); + if (lctx.graph_compute(gf, false) != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: failed to compute defrag\n", __func__); + return updated; + } - need_reserve = true; + updated = true; } do_defrag = false; } - return need_reserve; + return updated; } void llama_kv_cache_unified::defrag_sched(float thold) { + const auto n_kv = cells.used_max_p1(); + // - do not defrag small contexts (i.e. < 2048 tokens) // - count the padding towards the number of used tokens - const float fragmentation = n >= 2048 ? std::max(0.0f, 1.0f - (float(cells.get_used() + n_pad)/n)) : 0.0f; + const float fragmentation = n_kv >= 2048 ? std::max(0.0f, 1.0f - (float(cells.get_used() + n_pad)/n_kv)) : 0.0f; // queue defragmentation for next llama_kv_cache_update if (fragmentation > thold) { @@ -381,55 +453,37 @@ void llama_kv_cache_unified::defrag_sched(float thold) { } } -void llama_kv_cache_unified::set_full() { - n = cells.size(); - - // when simulating a full KV cache, the specific value of the "head" pointer is not important because it does not - // affect the shapes of the tensors in the compute graph - it only affects the offsets of the K/V views. - // we should only guarantee that the head position won't cause out-of-bounds view of the K, V tensors, so - // setting it to 0 is the simplest way to achieve that - // ref: https://github.com/ggml-org/llama.cpp/issues/13359 - head = 0; -} - -llama_sbatch llama_kv_cache_unified::sbatch_init(const llama_batch & batch, bool logits_all) { - return llama_sbatch(batch, hparams.n_embd, true, logits_all); -} - -llama_ubatch llama_kv_cache_unified::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const { - GGML_UNUSED(embd_pooled); - return sbatch.split_simple(n_ubatch); -} - -bool llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) { +int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const { const uint32_t n_tokens = ubatch.n_tokens; + uint32_t head_cur = this->head; + // if we have enough unused cells before the current head -> // better to start searching from the beginning of the cache, hoping to fill it - if (head > cells.get_used() + 2*ubatch.n_tokens) { - head = 0; + if (head_cur > cells.get_used() + 2*ubatch.n_tokens) { + head_cur = 0; } // otherwise, one cell per token. if (n_tokens > cells.size()) { LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size()); - return false; + return -1; } //#define FIND_SLOT_DEBUG 1 #if FIND_SLOT_DEBUG - LLAMA_LOG_WARN("begin: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", n, used, head, n_swa); + LLAMA_LOG_WARN("begin: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", cells.used_max_p1(), cells.get_used(), head, n_swa); // for debugging { std::string ss; if (n_swa > 0) { - for (uint32_t i = 0; i < size; ++i) { + for (uint32_t i = 0; i < cells.size(); ++i) { if (cells.is_empty(i)) { ss += '.'; } else { - ss += 'x'; + ss += std::to_string(cells.seq_get(i)); } if (i%256 == 255) { ss += '\n'; @@ -438,23 +492,70 @@ bool llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) { } LLAMA_LOG_WARN("\n%s\n", ss.c_str()); } + + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (cells.seq_pos_min(s) < 0) { + continue; + } + + LLAMA_LOG_WARN("kv_cells: n_swa = %4d, min[%d] = %5d, max[%d] = %5d\n", n_swa, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s)); + } #endif uint32_t n_tested = 0; while (true) { - if (head + n_tokens > cells.size()) { - n_tested += cells.size() - head; - head = 0; + if (head_cur + n_tokens > cells.size()) { + n_tested += cells.size() - head_cur; + head_cur = 0; continue; } + // keep track of what the minimum sequence positions would be if we accept the ubatch + llama_seq_id seq_pos_min[LLAMA_MAX_PARALLEL_SEQUENCES]; + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + seq_pos_min[s] = cells.seq_pos_min(s); + } + bool found = true; for (uint32_t i = 0; i < n_tokens; i++) { - // TODO: improve to accept cells that are masked by the SWA - if (!cells.is_empty(head + i)) { + const llama_pos pos = ubatch.pos[i]; + const llama_seq_id seq_id = ubatch.seq_id[i][0]; + + // can we use this cell? either: + // - the cell is empty + // - the cell is occupied only by one sequence: + // - mask causally, if the sequence is the same as the one we are inserting + // - mask SWA, using current max pos for that sequence in the cache + // always insert in the cell with minimum pos + bool can_use = cells.is_empty(head_cur + i); + + if (!can_use && cells.seq_count(head_cur + i) == 1) { + const llama_pos pos_cell = cells.pos_get(head_cur + i); + + // causal mask + if (cells.seq_has(head_cur + i, seq_id)) { + can_use = pos_cell >= pos; + } + + if (!can_use) { + const llama_seq_id seq_id_cell = cells.seq_get(head_cur + i); + + // SWA mask + // note: we insert only in the cell with minimum pos in order to preserve the invariant that + // all positions between [pos_min, pos_max] for each sequence will be present in the cache + // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092 + if (pos_cell == seq_pos_min[seq_id_cell] && + is_masked_swa(pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) { + seq_pos_min[seq_id_cell]++; + can_use = true; + } + } + } + + if (!can_use) { found = false; - head += i + 1; + head_cur += i + 1; n_tested += i + 1; break; } @@ -466,58 +567,55 @@ bool llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) { if (n_tested >= cells.size()) { //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); - return false; + return -1; } } - // store the old state of the cells in the recovery stack - recovery.states.push_back({head, cells.cp(head, n_tokens)}); + return head_cur; +} - for (uint32_t i = 0; i < n_tokens; ++i) { - cells.pos_set(head + i, ubatch.pos[i]); +void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) { + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + if (!cells.is_empty(head_cur + i)) { + cells.rm(head_cur + i); + } + + cells.pos_set(head_cur + i, ubatch.pos[i]); for (int32_t j = 0; j < ubatch.n_seq_id[i]; j++) { - cells.seq_add(head + i, ubatch.seq_id[i][j]); + cells.seq_add(head_cur + i, ubatch.seq_id[i][j]); } } - // a heuristic, to avoid attending the full cache if it is not yet utilized - // after enough generations, the benefit from this heuristic disappears - // if we start defragmenting the cache, the benefit from this will be more important - n = std::min(cells.size(), std::max(n_pad, GGML_PAD(cells.used_max_p1(), n_pad))); - -#ifdef FIND_SLOT_DEBUG - LLAMA_LOG_WARN("end: n = %5d, used = %5d, head = %5d, n_swa = %5d\n", n, used, head, n_swa); -#endif - - return true; + // move the head at the end of the slot + head = head_cur + ubatch.n_tokens; } bool llama_kv_cache_unified::get_can_shift() const { return true; } -uint32_t llama_kv_cache_unified::get_n() const { - return n; -} - uint32_t llama_kv_cache_unified::get_size() const { return cells.size(); } -ggml_tensor * llama_kv_cache_unified::get_k(ggml_context * ctx, int32_t il) const { +uint32_t llama_kv_cache_unified::get_n_kv() const { + return std::min(cells.size(), std::max(n_pad, GGML_PAD(cells.used_max_p1(), n_pad))); +} + +ggml_tensor * llama_kv_cache_unified::get_k(ggml_context * ctx, int32_t il, uint32_t n_kv) const { const int32_t ikv = map_layer_ids.at(il); auto * k = layers[ikv].k; return ggml_view_3d(ctx, k, - hparams.n_embd_head_k, hparams.n_head_kv(il), n, + hparams.n_embd_head_k, hparams.n_head_kv(il), n_kv, ggml_row_size(k->type, hparams.n_embd_head_k), ggml_row_size(k->type, hparams.n_embd_k_gqa(il)), 0); } -ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il) const { +ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il, uint32_t n_kv) const { const int32_t ikv = map_layer_ids.at(il); auto * v = layers[ikv].v; @@ -525,7 +623,7 @@ ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il) cons if (!v_trans) { // note: v->nb[1] <= v->nb[2] return ggml_view_3d(ctx, v, - hparams.n_embd_head_v, hparams.n_head_kv(il), n, + hparams.n_embd_head_v, hparams.n_head_kv(il), n_kv, ggml_row_size(v->type, hparams.n_embd_head_v), // v->nb[1] ggml_row_size(v->type, hparams.n_embd_v_gqa(il)), // v->nb[2] 0); @@ -533,13 +631,13 @@ ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il) cons // note: v->nb[1] > v->nb[2] return ggml_view_3d(ctx, v, - n, hparams.n_head_kv(il), hparams.n_embd_head_v, + n_kv, hparams.n_head_kv(il), hparams.n_embd_head_v, ggml_row_size(v->type, v->ne[1]*hparams.n_embd_head_v), // v->nb[1] ggml_row_size(v->type, v->ne[1]), // v->nb[2] 0); } -ggml_tensor * llama_kv_cache_unified::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const { +ggml_tensor * llama_kv_cache_unified::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il, uint32_t head_cur) const { const int32_t ikv = map_layer_ids.at(il); auto * k = layers[ikv].k; @@ -548,12 +646,12 @@ ggml_tensor * llama_kv_cache_unified::cpy_k(ggml_context * ctx, ggml_tensor * k_ ggml_tensor * k_view = ggml_view_1d(ctx, k, n_tokens*hparams.n_embd_k_gqa(il), - ggml_row_size(k->type, hparams.n_embd_k_gqa(il))*head); + ggml_row_size(k->type, hparams.n_embd_k_gqa(il))*head_cur); return ggml_cpy(ctx, k_cur, k_view); } -ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const { +ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il, uint32_t head_cur) const { const int32_t ikv = map_layer_ids.at(il); auto * v = layers[ikv].v; @@ -567,12 +665,12 @@ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_ if (!v_trans) { v_view = ggml_view_1d(ctx, v, n_tokens*hparams.n_embd_v_gqa(il), - ggml_row_size(v->type, hparams.n_embd_v_gqa(il))*head); + ggml_row_size(v->type, hparams.n_embd_v_gqa(il))*head_cur); } else { // note: the V cache is transposed when not using flash attention v_view = ggml_view_2d(ctx, v, n_tokens, hparams.n_embd_v_gqa(il), (v->ne[1])*ggml_element_size(v), - ( head)*ggml_element_size(v)); + (head_cur)*ggml_element_size(v)); v_cur = ggml_transpose(ctx, v_cur); } @@ -580,33 +678,6 @@ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_ return ggml_cpy(ctx, v_cur, v_view); } -void llama_kv_cache_unified::prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax) { - // no pruning is needed when the cache does not use SWA - GGML_ASSERT(swa_type != LLAMA_SWA_TYPE_NONE && "do not prune non-SWA cache"); - - int n_attended = 0; - - for (uint32_t i = 0; i < cells.size(); ++i) { - if (!cells.seq_has(i, seq_id)) { - continue; - } - - const llama_pos p0 = cells.pos_get(i); - - if (p0 <= pmin && !is_masked_swa(p0, pmin)) { - n_attended++; - } - - if (is_masked_swa(p0, pmax)) { - cells.seq_rm(i, seq_id); - } - } - - if (n_attended < std::min(n_swa, pmin)) { - LLAMA_LOG_WARN("%s: partial SWA cache detected - possible loss of information, pmin = %d, n_attended = %d, n_swa = %d\n", __func__, pmin, n_attended, n_swa); - } -} - void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; @@ -615,7 +686,7 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); float * data = (float *) dst->data; - const int64_t n_kv = n; + const auto n_kv = dst->ne[0]; // Use only the previous KV cells of the correct sequence for each token of the ubatch. // It's assumed that if a token in the batch has multiple sequences, they are equivalent. @@ -636,7 +707,7 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub for (int j = 0; j < n_seq_tokens; ++j) { const llama_pos p1 = ubatch->pos[s*n_seq_tokens + j]; - for (int i = 0; i < n_kv; ++i) { + for (uint32_t i = 0; i < n_kv; ++i) { float f = 0.0f; bool masked = false; @@ -672,7 +743,7 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub // mask padded tokens if (data) { for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_kv; ++j) { + for (uint32_t j = 0; j < n_kv; ++j) { data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; } } @@ -698,7 +769,7 @@ void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama int32_t * data = (int32_t *) dst->data; - const int64_t n_kv = n; + const int32_t n_kv = dst->ne[0]; for (int h = 0; h < 1; ++h) { for (int j = 0; j < n_tokens; ++j) { @@ -757,11 +828,19 @@ ggml_tensor * llama_kv_cache_unified::build_rope_shift( const auto & yarn_beta_slow = cparams.yarn_beta_slow; const auto & n_rot = hparams.n_rot; - const auto & rope_type = hparams.rope_type; + const auto & rope_type = hparams.rope_type == LLAMA_ROPE_TYPE_MROPE + // @ngxson : this is a workaround + // for M-RoPE, we want to rotate the whole vector when doing KV shift + // a normal RoPE should work, we just need to use the correct ordering + // ref: https://github.com/ggml-org/llama.cpp/pull/13870 + ? LLAMA_ROPE_TYPE_NEOX + : hparams.rope_type; // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. - const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor; + const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 + ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) + : cparams.yarn_attn_factor; ggml_tensor * tmp; @@ -1354,20 +1433,24 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell batch.seq_id[i] = &dest_seq_id; } - if (!find_slot(batch)) { + const auto head_cur = find_slot(batch); + if (head_cur < 0) { LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); return false; } - commit(); + apply_ubatch(head_cur, batch); - // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) + // keep the head at the old position because we will read the KV data into it in state_read_data() + head = head_cur; + + // DEBUG CHECK: head_cur should be our first cell, head_cur + cell_count - 1 should be our last cell (verify seq_id and pos values) // Assume that this is one contiguous block of cells - GGML_ASSERT(head + cell_count <= cells.size()); - GGML_ASSERT(cells.pos_get(head) == batch.pos[0]); - GGML_ASSERT(cells.pos_get(head + cell_count - 1) == batch.pos[cell_count - 1]); - GGML_ASSERT(cells.seq_has(head, dest_seq_id)); - GGML_ASSERT(cells.seq_has(head + cell_count - 1, dest_seq_id)); + GGML_ASSERT(head_cur + cell_count <= cells.size()); + GGML_ASSERT(cells.pos_get(head_cur) == batch.pos[0]); + GGML_ASSERT(cells.pos_get(head_cur + cell_count - 1) == batch.pos[cell_count - 1]); + GGML_ASSERT(cells.seq_has(head_cur, dest_seq_id)); + GGML_ASSERT(cells.seq_has(head_cur + cell_count - 1, dest_seq_id)); } else { // whole KV cache restore @@ -1417,10 +1500,12 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size()); return false; } + if (cell_count > cells.size()) { LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size()); return false; } + if (this->v_trans != (bool) v_trans) { LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); return false; @@ -1531,6 +1616,108 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell return true; } +// +// llama_kv_cache_unified_state +// + +llama_kv_cache_unified_state::llama_kv_cache_unified_state(llama_memory_status status) : status(status) {} + +llama_kv_cache_unified_state::llama_kv_cache_unified_state( + llama_memory_status status, + llama_kv_cache_unified * kv) : status(status), kv(kv) { + n_kv = kv->get_size(); + head = 0; + } + +llama_kv_cache_unified_state::llama_kv_cache_unified_state( + llama_memory_status status, + llama_kv_cache_unified * kv, + llama_sbatch sbatch, + std::vector heads, + std::vector ubatches) + : status(status), + kv(kv), + sbatch(std::move(sbatch)), + heads(std::move(heads)), + ubatches(std::move(ubatches)) { + } + +llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default; + +bool llama_kv_cache_unified_state::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_kv_cache_unified_state::apply() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + kv->apply_ubatch(heads[i_next], ubatches[i_next]); + + n_kv = kv->get_n_kv(); + head = heads[i_next]; + + return true; +} + +std::vector & llama_kv_cache_unified_state::out_ids() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return sbatch.out_ids; +} + +llama_memory_status llama_kv_cache_unified_state::get_status() const { + return status; +} + +const llama_ubatch & llama_kv_cache_unified_state::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return ubatches[i_next]; +} + +uint32_t llama_kv_cache_unified_state::get_n_kv() const { + return n_kv; +} + +ggml_tensor * llama_kv_cache_unified_state::get_k(ggml_context * ctx, int32_t il) const { + return kv->get_k(ctx, il, n_kv); +} + +ggml_tensor * llama_kv_cache_unified_state::get_v(ggml_context * ctx, int32_t il) const { + return kv->get_v(ctx, il, n_kv); +} + +ggml_tensor * llama_kv_cache_unified_state::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const { + return kv->cpy_k(ctx, k_cur, il, head); +} + +ggml_tensor * llama_kv_cache_unified_state::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const { + return kv->cpy_v(ctx, v_cur, il, head); +} + +void llama_kv_cache_unified_state::set_input_k_shift(ggml_tensor * dst) const { + kv->set_input_k_shift(dst); +} + +void llama_kv_cache_unified_state::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { + kv->set_input_kq_mask(dst, ubatch, causal_attn); +} + +void llama_kv_cache_unified_state::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const { + kv->set_input_pos_bucket(dst, ubatch); +} + +uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} + // // llama_kv_cache_unified_iswa // @@ -1544,22 +1731,21 @@ llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa( bool swa_full, uint32_t kv_size, uint32_t n_seq_max, - uint32_t n_batch, + uint32_t n_ubatch, uint32_t n_pad) : hparams(model.hparams) { llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); }; llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); }; const uint32_t size_base = kv_size; - uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_batch, n_pad)); + uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*n_seq_max + n_ubatch, n_pad)); - // when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size and disable pruning + // when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size if (swa_full) { LLAMA_LOG_WARN("%s: using full-size SWA cache (ref: %s)\n", __func__, "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055"); size_swa = size_base; - do_prune = false; } LLAMA_LOG_INFO("%s: creating non-SWA KV cache, size = %u cells\n", __func__, size_base); @@ -1620,31 +1806,47 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { return kv_swa->seq_pos_max(seq_id); } -void llama_kv_cache_unified_iswa::restore() { - kv_base->restore(); - kv_swa ->restore(); -} +llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled, bool logits_all) { + GGML_UNUSED(embd_pooled); -void llama_kv_cache_unified_iswa::commit() { - kv_base->commit(); - kv_swa ->commit(); + // TODO: if we fail with split_simple, we should attempt different splitting strategies + // but to do that properly, we first have to refactor the batches to be more flexible - // slide the attention window, forgetting/pruning old tokens that are outside the window - if (do_prune) { - for (const auto & [seq_id, entry] : pending.pos) { - kv_swa->prune_swa(seq_id, entry.pmin, entry.pmax); - } + auto sbatch = llama_sbatch(batch, hparams.n_embd, true, logits_all); + std::vector ubatches; + + while (sbatch.n_tokens > 0) { + auto ubatch = sbatch.split_simple(n_ubatch); + + ubatches.push_back(ubatch); } - pending.clear(); + auto heads_base = kv_base->prepare(ubatches); + if (heads_base.empty()) { + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + auto heads_swa = kv_swa->prepare(ubatches); + if (heads_swa.empty()) { + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + assert(heads_base.size() == heads_swa.size()); + + return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, + this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); +} + +llama_memory_state_ptr llama_kv_cache_unified_iswa::init_full() { + return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, this); } bool llama_kv_cache_unified_iswa::update(llama_context & lctx) { - bool res = true; + bool res = false; - res = res & kv_base->update(lctx); - res = res & kv_swa ->update(lctx); + res = res | kv_base->update(lctx); + res = res | kv_swa ->update(lctx); return res; } @@ -1654,48 +1856,6 @@ void llama_kv_cache_unified_iswa::defrag_sched(float thold) { kv_swa ->defrag_sched(thold); } -void llama_kv_cache_unified_iswa::set_full() { - kv_base->set_full(); - kv_swa ->set_full(); -} - -llama_sbatch llama_kv_cache_unified_iswa::sbatch_init(const llama_batch & batch, bool logits_all) { - pending.clear(); - - if (do_prune) { - for (int i = 0; i < batch.n_tokens; ++i) { - for (int s = 0; s < batch.n_seq_id[i]; ++s) { - const llama_seq_id seq_id = batch.seq_id[i][s]; - const llama_pos pos = batch.pos[i]; - - if (pending.pos.find(seq_id) == pending.pos.end()) { - pending.pos[seq_id].pmin = pos; - pending.pos[seq_id].pmax = pos; - } else { - pending.pos[seq_id].pmin = std::min(pending.pos[seq_id].pmin, pos); - pending.pos[seq_id].pmax = std::max(pending.pos[seq_id].pmax, pos); - } - } - } - } - - return llama_sbatch(batch, hparams.n_embd, true, logits_all); -} - -llama_ubatch llama_kv_cache_unified_iswa::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const { - GGML_UNUSED(embd_pooled); - return sbatch.split_simple(n_ubatch); -} - -bool llama_kv_cache_unified_iswa::find_slot(const llama_ubatch & batch) { - bool res = true; - - res = res & kv_base->find_slot(batch); - res = res & kv_swa ->find_slot(batch); - - return res; -} - bool llama_kv_cache_unified_iswa::get_can_shift() const { return kv_base->get_size() == kv_swa->get_size(); } @@ -1710,14 +1870,95 @@ void llama_kv_cache_unified_iswa::state_read(llama_io_read_i & io, llama_seq_id kv_swa ->state_read(io, seq_id); } -llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_kv_base() const { +llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_base() const { return kv_base.get(); } -llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_kv_swa() const { +llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_swa() const { return kv_swa.get(); } +// +// llama_kv_cache_unified_iswa_state +// + +llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(llama_memory_status status) : status(status) {} + +llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( + llama_memory_status status, + llama_kv_cache_unified_iswa * kv) : status(status) { + state_base.reset(new llama_kv_cache_unified_state(status, kv->get_base())); + state_swa .reset(new llama_kv_cache_unified_state(status, kv->get_swa ())); +} + +llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( + llama_memory_status status, + llama_kv_cache_unified_iswa * kv, + llama_sbatch sbatch, + std::vector heads_base, + std::vector heads_swa, + std::vector ubatches) + : status(status), + sbatch(std::move(sbatch)), + ubatches(std::move(ubatches)) { + // note: here we copy the ubatches. not sure if this is ideal + state_base.reset(new llama_kv_cache_unified_state(status, kv->get_base(), {}, std::move(heads_base), this->ubatches)); + state_swa .reset(new llama_kv_cache_unified_state(status, kv->get_swa (), {}, std::move(heads_swa), this->ubatches)); + } + +llama_kv_cache_unified_iswa_state:: ~llama_kv_cache_unified_iswa_state() = default; + +bool llama_kv_cache_unified_iswa_state::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + state_base->next(); + state_swa ->next(); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_kv_cache_unified_iswa_state::apply() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + bool res = true; + + res = res & state_base->apply(); + res = res & state_swa ->apply(); + + return res; +} + +std::vector & llama_kv_cache_unified_iswa_state::out_ids() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return sbatch.out_ids; +} + +llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const { + return status; +} + +const llama_ubatch & llama_kv_cache_unified_iswa_state::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + return ubatches[i_next]; +} + +const llama_kv_cache_unified_state * llama_kv_cache_unified_iswa_state::get_base() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return state_base.get(); +} + +const llama_kv_cache_unified_state * llama_kv_cache_unified_iswa_state::get_swa() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return state_swa.get(); +} + // // llama_kv_cache_recurrent // @@ -2063,20 +2304,73 @@ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { return result; } -void llama_kv_cache_recurrent::restore() { - if (pending.ranges.empty()) { - return; +llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled, bool logits_all) { + GGML_UNUSED(embd_pooled); + + auto sbatch = llama_sbatch(batch, hparams.n_embd, false, logits_all); + + std::vector ubatches; + + while (sbatch.n_tokens > 0) { + llama_ubatch ubatch; + + if (embd_pooled) { + // Pooled embeddings cannot be split across ubatches (yet) + ubatch = sbatch.split_seq(n_ubatch); + } else { + ubatch = sbatch.split_equal(n_ubatch); + } + + ubatches.push_back(ubatch); } - seq_rm(-1, -1, -1); + if (!prepare(ubatches)) { + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, this, std::move(sbatch), std::move(ubatches)); } -void llama_kv_cache_recurrent::commit() { - pending.ranges.clear(); +llama_memory_state_ptr llama_kv_cache_recurrent::init_full() { + return std::make_unique(LLAMA_MEMORY_STATUS_SUCCESS, this); } -bool llama_kv_cache_recurrent::update(llama_context & ctx) { - GGML_UNUSED(ctx); +bool llama_kv_cache_recurrent::prepare(const std::vector & ubatches) { + // simply remember the full state because it is very small for this type of cache + // TODO: optimize + auto org_cells = cells; + auto org_used = used; + auto org_head = head; + + bool success = true; + + // TODO: here we have to verify that all ubatches can fit in the cells + // however, the current implementation is broken because it relies on s_copy() and s_mask() to update the cells + // during the compute of each ubatch. to reproduce, uncomment the following loop and run: + // + // $ llama-parallel -m ./mamba-130m/ggml-model-f16.gguf -np 5 -ns 8 + // + // recovery from failures when the batch does not fit in the KV cache will not work correctly until this is fixed + // + GGML_UNUSED(ubatches); + //for (const auto & ubatch : ubatches) { + // if (!find_slot(ubatch)) { + // success = false; + // break; + // } + //} + + // restore the original state + cells = std::move(org_cells); + used = org_used; + head = org_head; + + return success; +} + +bool llama_kv_cache_recurrent::update(llama_context & lctx) { + GGML_UNUSED(lctx); + // noop return false; } @@ -2085,28 +2379,7 @@ void llama_kv_cache_recurrent::defrag_sched(float thold) { // noop } -void llama_kv_cache_recurrent::set_full() { - n = size; - head = 0; -} - -llama_sbatch llama_kv_cache_recurrent::sbatch_init( - const llama_batch & batch, - bool logits_all) { - return llama_sbatch(batch, hparams.n_embd, false, logits_all); -} - -llama_ubatch llama_kv_cache_recurrent::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const { - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) - return sbatch.split_seq(n_ubatch); - } - - return sbatch.split_equal(n_ubatch); -} - -bool llama_kv_cache_recurrent::find_slot( - const llama_ubatch & ubatch) { +bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { const uint32_t n_tokens = ubatch.n_tokens; const uint32_t n_seqs = ubatch.n_seqs; @@ -2324,18 +2597,6 @@ float llama_kv_cache_recurrent::s_mask(int i) const { return res; } -uint32_t llama_kv_cache_recurrent::cell_max() const { - for (uint32_t i = size; i > 0; --i) { - const kv_cell & cell = cells[i - 1]; - - if (cell.pos >= 0 && !cell.is_empty()) { - return i; - } - } - - return 0; -} - size_t llama_kv_cache_recurrent::total_size() const { size_t size = 0; for (const auto & buf : bufs) { @@ -2550,11 +2811,11 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce } batch.n_seq_id[0] = 1; batch.seq_id[0] = &dest_seq_id; + if (!find_slot(batch)) { LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); return false; } - commit(); // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) // Assume that this is one contiguous block of cells @@ -2737,3 +2998,84 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce return true; } + +// +// llama_kv_cache_recurrent_state +// + +llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(llama_memory_status status) : status(status) {} + +llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state( + llama_memory_status status, + llama_kv_cache_recurrent * kv) : status(status), kv(kv), is_full(true) { +} + +llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state( + llama_memory_status status, + llama_kv_cache_recurrent * kv, + llama_sbatch sbatch, + std::vector ubatches) : status(status), kv(kv), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {} + +llama_kv_cache_recurrent_state::~llama_kv_cache_recurrent_state() = default; + +bool llama_kv_cache_recurrent_state::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_kv_cache_recurrent_state::apply() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + kv->find_slot(ubatches[i_next]); + + return true; +} + +std::vector & llama_kv_cache_recurrent_state::out_ids() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return sbatch.out_ids; +} + +llama_memory_status llama_kv_cache_recurrent_state::get_status() const { + return status; +} + +const llama_ubatch & llama_kv_cache_recurrent_state::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return ubatches[i_next]; +} + +uint32_t llama_kv_cache_recurrent_state::get_n_kv() const { + return is_full ? kv->size : kv->n; +} + +uint32_t llama_kv_cache_recurrent_state::get_head() const { + return is_full ? 0 : kv->head; +} + +uint32_t llama_kv_cache_recurrent_state::get_size() const { + return kv->size; +} + +ggml_tensor * llama_kv_cache_recurrent_state::get_k_l(int32_t il) const { + return kv->k_l[il]; +} + +ggml_tensor * llama_kv_cache_recurrent_state::get_v_l(int32_t il) const { + return kv->v_l[il]; +} + +int32_t llama_kv_cache_recurrent_state::s_copy(int i) const { + return kv->s_copy(i); +} + +float llama_kv_cache_recurrent_state::s_mask(int i) const { + return kv->s_mask(i); +} diff --git a/src/llama-kv-cache.h b/src/llama-kv-cache.h index ce6261e45a..75969878e3 100644 --- a/src/llama-kv-cache.h +++ b/src/llama-kv-cache.h @@ -2,6 +2,7 @@ #include "llama.h" #include "llama-io.h" +#include "llama-batch.h" #include "llama-graph.h" #include "llama-memory.h" #include "llama-kv-cells.h" @@ -14,48 +15,35 @@ struct llama_cparams; struct llama_hparams; -struct llama_ubatch; -struct llama_sbatch; struct llama_model; struct llama_context; struct llama_kv_cache : public llama_memory_i { virtual ~llama_kv_cache() = default; - // call if batch processing fails - restores the cache state - virtual void restore() = 0; + // split the input batch into a set of ubatches and verify that they can fit into the cache + // return a state object containing the ubatches and KV cache state required to process them + // check the llama_memory_state_i::get_status() for the result + virtual llama_memory_state_ptr init_batch( + const llama_batch & batch, + uint32_t n_ubatch, + bool embd_pooled, + bool logits_all) = 0; - // call after successful batch processing - clears any pending state - virtual void commit() = 0; + // simulate full cache, used for allocating worst-case compute buffers + virtual llama_memory_state_ptr init_full() = 0; // process any pending defrag/shift/etc. operations // optionally call once before processing a new batch + // return true if any operations were performed virtual bool update(llama_context & lctx) = 0; // schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing + // TODO: change to + // llama_memory_state_ptr init_defrag(float thold) = 0; + // virtual void defrag_sched(float thold) = 0; - // simulate full cache, used for allocating worst-case compute buffers - // TODO: remove - virtual void set_full() = 0; - - // - // batch processing - // - - // ============================================================================================================= - // TODO: refactor and simplify this [TAG: KV_API] - - virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0; - - // different KV caches require different batch splitting strategies - virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0; - - // find an empty slot of size "n_tokens" in the cache - virtual bool find_slot(const llama_ubatch & batch) = 0; - - // ============================================================================================================= - // getters virtual bool get_can_shift() const = 0; @@ -69,25 +57,6 @@ struct llama_kv_cache : public llama_memory_i { virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0; }; -// -// llama_kv_cache_guard -// - -struct llama_kv_cache_guard { - llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {} - - ~llama_kv_cache_guard() { - kv->restore(); - } - - void commit() { - kv->commit(); - } - -private: - llama_kv_cache * kv; -}; - // // llama_kv_cache_unified // @@ -133,23 +102,18 @@ public: // llama_kv_cache // - void restore() override; - void commit() override; + llama_memory_state_ptr init_batch( + const llama_batch & batch, + uint32_t n_ubatch, + bool embd_pooled, + bool logits_all) override; - bool update(llama_context & ctx) override; + llama_memory_state_ptr init_full() override; + + bool update(llama_context & lctx) override; void defrag_sched(float thold) override; - void set_full() override; - - llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override; - llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override; - - // updates the cache head - // Note: On success, it's important that cache.head points - // to the first cell of the slot. - bool find_slot(const llama_ubatch & batch) override; - bool get_can_shift() const override; // state write/load @@ -161,18 +125,40 @@ public: // llama_kv_cache_unified specific API // - uint32_t get_n() const; uint32_t get_size() const; + // + // graph_build API + // + + uint32_t get_n_kv() const; + // get views of the current state of the cache - ggml_tensor * get_k(ggml_context * ctx, int32_t il) const; - ggml_tensor * get_v(ggml_context * ctx, int32_t il) const; + ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv) const; + ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv) const; - // store k_cur and v_cur in the cache based on the current head location - ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const; - ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const; + // store k_cur and v_cur in the cache based on the provided head location + ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il, uint32_t head_cur) const; + ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il, uint32_t head_cur) const; - void prune_swa(llama_seq_id seq_id, llama_pos pmin, llama_pos pmax); + // + // preparation API + // + + // find places for the provided ubatches in the cache, returns the head locations + // return empty vector on failure + std::vector prepare(const std::vector & ubatches); + + // return the cell position where we can insert the ubatch + // return -1 on failure to find a contiguous slot of kv cells + int32_t find_slot(const llama_ubatch & ubatch) const; + + // emplace the ubatch context into slot: [head_cur, head_cur + ubatch.n_tokens) + void apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch); + + // + // set_input API + // void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const; void set_input_k_shift (ggml_tensor * dst) const; @@ -194,11 +180,9 @@ private: bool do_defrag = false; bool v_trans = true; // the value tensor is transposed - uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot()) - - // computed before each graph build - // TODO: cells should start to maintain this value dynamically based on the edits - uint32_t n = 0; + // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot()) + // note: this is not part of the KV state and it's only used to speed-up the find_slot() method + uint32_t head = 0; const uint32_t n_seq_max = 1; @@ -220,24 +204,6 @@ private: // model layer id -> KV cache layer id std::unordered_map map_layer_ids; - // recovery information used to restore the KV cells to their original state in case of a failure - // TODO: do not store as a state in the llama_kv_cache object, instead return upon batch preparation - // to achieve that, first need to refactor the llama_kv_cache interface [TAG: KV_API] - struct { - void clear() { - states.clear(); - } - - struct state { - uint32_t i; - - llama_kv_cells_unified cells; - }; - - // stack with the partial states before each ubatch - std::vector states; - } recovery; - // defrag struct { std::vector ids; @@ -279,13 +245,88 @@ private: bool state_read_data(llama_io_read_i & io, uint32_t cell_count); }; +class llama_kv_cache_unified_state : public llama_memory_state_i { +public: + // used for errors + llama_kv_cache_unified_state(llama_memory_status status); + + // used to create a full-cache state + llama_kv_cache_unified_state( + llama_memory_status status, + llama_kv_cache_unified * kv); + + // used to create a state from a batch + llama_kv_cache_unified_state( + llama_memory_status status, + llama_kv_cache_unified * kv, + llama_sbatch sbatch, + std::vector heads, + std::vector ubatches); + + virtual ~llama_kv_cache_unified_state(); + + // + // llama_memory_state_i + // + + bool next() override; + bool apply() override; + + std::vector & out_ids() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_kv_cache_unified_state specific API + // + + uint32_t get_n_kv() const; + + // get views of the current state of the cache + ggml_tensor * get_k(ggml_context * ctx, int32_t il) const; + ggml_tensor * get_v(ggml_context * ctx, int32_t il) const; + + // store k_cur and v_cur in the cache based on the provided head location + ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, int32_t il) const; + ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, int32_t il) const; + + void set_input_k_shift(ggml_tensor * dst) const; + + void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const; + void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const; + +private: + const llama_memory_status status; + + llama_kv_cache_unified * kv; + + llama_sbatch sbatch; + + // the index of the next ubatch to process + size_t i_next = 0; + + std::vector heads; + std::vector ubatches; + + // + // data needed for building the compute graph for the current ubatch: + // + + // a heuristic, to avoid attending the full cache if it is not yet utilized + // as the cache gets filled, the benefit from this heuristic disappears + int32_t n_kv; + + // the beginning of the current slot in which the ubatch will be inserted + int32_t head; +}; + // // llama_kv_cache_unified_iswa // // utilizes two instances of llama_kv_cache_unified // the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers -// upon successful commit, the SWA cache removes old tokens outside the n_swa window class llama_kv_cache_unified_iswa : public llama_kv_cache { public: @@ -298,7 +339,7 @@ public: bool swa_full, uint32_t kv_size, uint32_t n_seq_max, - uint32_t n_batch, + uint32_t n_ubatch, uint32_t n_pad); ~llama_kv_cache_unified_iswa() = default; @@ -322,20 +363,18 @@ public: // llama_kv_cache // - void restore() override; - void commit() override; + llama_memory_state_ptr init_batch( + const llama_batch & batch, + uint32_t n_ubatch, + bool embd_pooled, + bool logits_all) override; - bool update(llama_context & ctx) override; + llama_memory_state_ptr init_full() override; + + bool update(llama_context & lctx) override; void defrag_sched(float thold) override; - void set_full() override; - - llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override; - llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override; - - bool find_slot(const llama_ubatch & batch) override; - bool get_can_shift() const override; // state write/load @@ -347,58 +386,80 @@ public: // llama_kv_cache_unified_iswa specific API // - llama_kv_cache_unified * get_kv_base() const; - llama_kv_cache_unified * get_kv_swa () const; + llama_kv_cache_unified * get_base() const; + llama_kv_cache_unified * get_swa () const; private: const llama_hparams & hparams; - bool do_prune = true; - - struct { - struct entry { - llama_pos pmin; - llama_pos pmax; - }; - - void clear() { - pos.clear(); - } - - // used to perform SWA pruning of old tokens - std::unordered_map pos; - } pending; - std::unique_ptr kv_base; std::unique_ptr kv_swa; }; +class llama_kv_cache_unified_iswa_state : public llama_memory_state_i { +public: + // used for errors + llama_kv_cache_unified_iswa_state(llama_memory_status status); + + // used to create a full-cache state + llama_kv_cache_unified_iswa_state( + llama_memory_status status, + llama_kv_cache_unified_iswa * kv); + + // used to create a state from a batch + llama_kv_cache_unified_iswa_state( + llama_memory_status status, + llama_kv_cache_unified_iswa * kv, + llama_sbatch sbatch, + std::vector heads_base, + std::vector heads_swa, + std::vector ubatches); + + virtual ~llama_kv_cache_unified_iswa_state(); + + // + // llama_memory_state_i + // + + bool next() override; + bool apply() override; + + std::vector & out_ids() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_kv_cache_unified_iswa_state specific API + // + + const llama_kv_cache_unified_state * get_base() const; + const llama_kv_cache_unified_state * get_swa() const; + +private: + const llama_memory_status status; + + //llama_kv_cache_unified_iswa * kv; + + llama_sbatch sbatch; + + // the index of the next ubatch to process + size_t i_next = 0; + + std::vector ubatches; + + std::unique_ptr state_base; + std::unique_ptr state_swa; +}; + // // llama_kv_cache_recurrent // +// TODO: extract the KV cache state used for graph computation into llama_kv_cache_recurrent_state_i +// see the implementation of llama_kv_cache_unified_state_i for an example how to do it class llama_kv_cache_recurrent : public llama_kv_cache { public: - struct kv_cell { - llama_pos pos = -1; - int32_t src = -1; // used to copy states - int32_t tail = -1; - - std::set seq_id; - - bool has_seq_id(const llama_seq_id & id) const { - return seq_id.find(id) != seq_id.end(); - } - - bool is_empty() const { - return seq_id.empty(); - } - - bool is_same_seq(const kv_cell & other) const { - return seq_id == other.seq_id; - } - }; - llama_kv_cache_recurrent( const llama_model & model, ggml_type type_k, @@ -428,19 +489,22 @@ public: // llama_kv_cache // - void restore() override; - void commit() override; + llama_memory_state_ptr init_batch( + const llama_batch & batch, + uint32_t n_ubatch, + bool embd_pooled, + bool logits_all) override; - bool update(llama_context & ctx) override; + llama_memory_state_ptr init_full() override; + + bool update(llama_context & lctx) override; void defrag_sched(float thold) override; - void set_full() override; + bool prepare(const std::vector & ubatches); - llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override; - llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override; - - bool find_slot(const llama_ubatch & batch) override; + // find a contiguous slot of kv cells and emplace the ubatch there + bool find_slot(const llama_ubatch & ubatch); bool get_can_shift() const override; @@ -460,6 +524,27 @@ public: // computed before each graph build uint32_t n = 0; + // TODO: optimize for recurrent state needs + struct kv_cell { + llama_pos pos = -1; + int32_t src = -1; // used to copy states + int32_t tail = -1; + + std::set seq_id; + + bool has_seq_id(const llama_seq_id & id) const { + return seq_id.find(id) != seq_id.end(); + } + + bool is_empty() const { + return seq_id.empty(); + } + + bool is_same_seq(const kv_cell & other) const { + return seq_id == other.seq_id; + } + }; + std::vector cells; std::vector k_l; // per layer @@ -469,26 +554,11 @@ private: //const llama_model & model; const llama_hparams & hparams; - // commit/restore cache - // TODO: rework for recurrent cache - struct slot_range { - uint32_t c0 = 0; // note: these are cell indices, not sequence positions - uint32_t c1 = 0; - }; - - // pending cell updates that are not yet committed - struct { - std::vector ranges; - } pending; - const uint32_t n_seq_max = 1; std::vector ctxs; std::vector bufs; - // find how many cells are currently in use - uint32_t cell_max() const; - size_t total_size() const; size_t size_k_bytes() const; @@ -500,3 +570,67 @@ private: bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1); bool state_read_data(llama_io_read_i & io, uint32_t cell_count); }; + +class llama_kv_cache_recurrent_state : public llama_memory_state_i { +public: + // used for errors + llama_kv_cache_recurrent_state(llama_memory_status status); + + // used to create a full-cache state + llama_kv_cache_recurrent_state( + llama_memory_status status, + llama_kv_cache_recurrent * kv); + + // used to create a state from a batch + llama_kv_cache_recurrent_state( + llama_memory_status status, + llama_kv_cache_recurrent * kv, + llama_sbatch sbatch, + std::vector ubatches); + + virtual ~llama_kv_cache_recurrent_state(); + + // + // llama_memory_state_i + // + + bool next() override; + bool apply() override; + + std::vector & out_ids() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_kv_cache_recurrent_state specific API + // + + uint32_t get_n_kv() const; + uint32_t get_head() const; + uint32_t get_size() const; + + ggml_tensor * get_k_l(int32_t il) const; + ggml_tensor * get_v_l(int32_t il) const; + + int32_t s_copy(int i) const; + float s_mask(int i) const; + +private: + const llama_memory_status status; + + llama_kv_cache_recurrent * kv; + + llama_sbatch sbatch; + + size_t i_next = 0; + + std::vector ubatches; + + // + // data needed for building the compute graph for the current ubatch: + // TODO: extract all the state like `head` and `n` here + // + + const bool is_full = false; +}; diff --git a/src/llama-kv-cells.h b/src/llama-kv-cells.h index dbbd03fcba..9e2c4d9276 100644 --- a/src/llama-kv-cells.h +++ b/src/llama-kv-cells.h @@ -68,12 +68,6 @@ public: // the index of the last cell that is used + 1 // return 0 if no cells are used uint32_t used_max_p1() const { -#if 0 - if (!seq_pos[0].empty()) printf("kv_cells: min[0] = %5d, max[0] = %5d\n", *seq_pos[0].begin(), *seq_pos[0].rbegin()); - if (!seq_pos[1].empty()) printf("kv_cells: min[1] = %5d, max[1] = %5d\n", *seq_pos[1].begin(), *seq_pos[1].rbegin()); - if (!seq_pos[2].empty()) printf("kv_cells: min[2] = %5d, max[2] = %5d\n", *seq_pos[2].begin(), *seq_pos[2].rbegin()); -#endif - return used.empty() ? 0 : *used.rbegin() + 1; } @@ -144,6 +138,19 @@ public: } } + // clear a non-empty cell + void rm(uint32_t i) { + assert(i < pos.size()); + assert(pos[i] != -1); + + seq_pos_rm(i); + + pos[i] = -1; + seq[i].reset(); + + used.erase(i); + } + // note: call only if the cell has seq_id // return true if the cell becomes empty bool seq_rm(uint32_t i, llama_seq_id seq_id) { @@ -196,6 +203,15 @@ public: return false; } + // number of different sequences in the cell + int seq_count(uint32_t i) const { + assert(i < pos.size()); + assert(pos[i] != -1); + + return seq[i].count(); + } + + // check if the cell contains seq_id bool seq_has(uint32_t i, llama_seq_id seq_id) const { assert(i < pos.size()); assert(seq_id >= 0); @@ -213,6 +229,20 @@ public: seq_pos[seq_id].insert(pos[i]); } + // return the sequence id of this cell + // note: call only for cells with exactly one sequence + llama_seq_id seq_get(uint32_t i) const { + assert(seq[i].count() == 1); + + for (int s = 0; s < LLAMA_MAX_PARALLEL_SEQUENCES; ++s) { + if (seq[i].test(s)) { + return s; + } + } + + return -1; + } + // the minimum position of sequence seq_id currently present in any of the cells // return -1 if the sequence is not present llama_pos seq_pos_min(llama_seq_id seq_id) const { @@ -268,6 +298,7 @@ public: void pos_set(uint32_t i, llama_pos p) { assert(i < pos.size()); assert(pos[i] == -1); + assert(seq[i].none()); pos[i] = p; diff --git a/src/llama-memory.h b/src/llama-memory.h index a2d250434a..b3799d66e8 100644 --- a/src/llama-memory.h +++ b/src/llama-memory.h @@ -2,6 +2,11 @@ #include "llama.h" +#include +#include + +struct llama_ubatch; + struct llama_memory_params { // kv cache ggml_type type_k; @@ -30,3 +35,42 @@ public: virtual bool get_can_edit() const = 0; }; + +enum llama_memory_status { + LLAMA_MEMORY_STATUS_SUCCESS = 0, + LLAMA_MEMORY_STATUS_FAILED_PREPARE, + LLAMA_MEMORY_STATUS_FAILED_COMPUTE, +}; + +// the interface for managing the memory state during batch processing +// this interface is implemented per memory type. see: +// - llama_kv_cache_unified_state +// - llama_kv_cache_unified_iswa_state +// ... +// +// the only method that can mutate the memory and the memory state is llama_memory_i::apply() +// +// TODO: rename to llama_memory_context_i ? +class llama_memory_state_i { +public: + virtual ~llama_memory_state_i() = default; + + // consume the current ubatch from the state and proceed to the next one + // return false if we are done + virtual bool next() = 0; + + // apply the memory state for the current ubatch to the memory object + // return false on failure + virtual bool apply() = 0; + + // TODO: this might get reworked in the future when refactoring llama_batch + virtual std::vector & out_ids() = 0; + + // get the current ubatch + virtual const llama_ubatch & get_ubatch() const = 0; + + // get the status of the memory state + virtual llama_memory_status get_status() const = 0; +}; + +using llama_memory_state_ptr = std::unique_ptr; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e99f5309f9..44c07ff457 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -683,6 +683,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); + ml.get_arr_n(LLM_KV_CLASSIFIER_OUTPUT_LABELS, hparams.n_cls_out, false); switch (hparams.n_layer) { case 3: @@ -2113,7 +2114,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { case LLM_ARCH_NOMIC_BERT_MOE: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); + type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, TENSOR_NOT_REQUIRED); if (arch == LLM_ARCH_BERT) { pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0); @@ -2121,8 +2122,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); - cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED); - cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {1}, TENSOR_NOT_REQUIRED); + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED); } tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); @@ -2131,7 +2132,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) { for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; - if (arch == LLM_ARCH_BERT) { + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + + if (!layer.wqkv) { layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); @@ -2140,12 +2144,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); - } else { - layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); - } - - if (arch == LLM_ARCH_NOMIC_BERT_MOE) { - layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); } layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); @@ -5887,8 +5885,10 @@ struct llm_build_bert : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); // token types are hardcoded to zero ("Sentence A") - ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); - inpL = ggml_add(ctx0, inpL, type_row0); + if (model.type_embd) { + ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); + inpL = ggml_add(ctx0, inpL, type_row0); + } if (model.arch == LLM_ARCH_BERT) { inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL); } @@ -5909,36 +5909,11 @@ struct llm_build_bert : public llm_graph_context { ggml_tensor * Vcur; // self-attention - if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) { - Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); - - if (model.layers[il].attn_q_norm) { - Qcur = build_norm(Qcur, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, il); - } - - Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); - - if (model.layers[il].attn_k_norm) { - Kcur = build_norm(Kcur, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, il); - } - - Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - } else { - // compute Q and K and RoPE them + if (model.layers[il].wqkv) { cur = build_lora_mm(model.layers[il].wqkv, cur); cb(cur, "wqkv", il); - if (model.arch == LLM_ARCH_NOMIC_BERT_MOE) { + if (model.layers[il].bqkv) { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); } @@ -5946,11 +5921,32 @@ struct llm_build_bert : public llm_graph_context { Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } else { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); + } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, il); + } + if (model.layers[il].attn_k_norm) { + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // RoPE + if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) { Qcur = ggml_rope_ext( ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, @@ -8896,9 +8892,9 @@ struct llm_build_mamba : public llm_graph_context { ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); - const auto kv_head = kv_self->head; + const auto kv_head = kv_state->get_head(); const int64_t d_conv = hparams.ssm_d_conv; const int64_t d_inner = hparams.ssm_d_inner; @@ -8916,8 +8912,8 @@ struct llm_build_mamba : public llm_graph_context { GGML_ASSERT(ubatch.equal_seqs); GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); - ggml_tensor * conv_states_all = kv_self->k_l[il]; - ggml_tensor * ssm_states_all = kv_self->v_l[il]; + ggml_tensor * conv_states_all = kv_state->get_k_l(il); + ggml_tensor * ssm_states_all = kv_state->get_v_l(il); // (ab)using the KV cache to store the states ggml_tensor * conv = build_copy_mask_state( @@ -11644,7 +11640,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -11654,7 +11650,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { const auto n_head = n_embd / head_size; const auto n_head_kv = hparams.n_head_kv(il); - const auto kv_head = kv_self->head; + const auto kv_head = kv_state->get_head(); const auto & layer = model.layers[il]; @@ -11766,7 +11762,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { } ggml_tensor * wkv_state = build_copy_mask_state( - gf, kv_self->v_l[il], state_copy, state_mask, + gf, kv_state->get_v_l(il), state_copy, state_mask, hparams.n_embd_v_s(), n_seqs); ggml_tensor * wkv_output; @@ -11785,9 +11781,9 @@ struct llm_build_rwkv6_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_self->v_l[il], + kv_state->get_v_l(il), hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il]) + hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il)) ) ) ); @@ -12040,7 +12036,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { ggml_tensor *& first_layer_value, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * kv_state = static_cast(mstate); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -12049,7 +12045,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { const auto head_count = n_embd / head_size; const auto n_seq_tokens = ubatch.n_seq_tokens; - const auto kv_head = kv_self->head; + const auto kv_head = kv_state->get_head(); const auto & layer = model.layers[il]; @@ -12120,7 +12116,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens); ggml_tensor * wkv_state = build_copy_mask_state( - gf, kv_self->v_l[il], state_copy, state_mask, + gf, kv_state->get_v_l(il), state_copy, state_mask, hparams.n_embd_v_s(), n_seqs); ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state); @@ -12134,9 +12130,9 @@ struct llm_build_rwkv7_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_self->v_l[il], + kv_state->get_v_l(il), hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il]) + hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_state->get_v_l(il)) ) ) ); @@ -13234,7 +13230,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, params.swa_full, cparams.n_ctx, cparams.n_seq_max, - cparams.n_batch, + cparams.n_ubatch, padding); } else { GGML_ASSERT(!hparams.is_swa_any()); @@ -13266,7 +13262,6 @@ llm_graph_result_ptr llama_model::build_graph( switch (arch) { case LLM_ARCH_LLAMA: - case LLM_ARCH_MINICPM: { llm = std::make_unique(*this, params, gf); } break; @@ -13507,6 +13502,7 @@ llm_graph_result_ptr llama_model::build_graph( } break; case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_MINICPM: { llm = std::make_unique(*this, params, gf); } break; @@ -13597,6 +13593,10 @@ int32_t llama_model_n_head_kv(const llama_model * model) { return model->hparams.n_head_kv(); } +int32_t llama_model_n_swa(const llama_model * model) { + return model->hparams.n_swa; +} + // deprecated int32_t llama_n_ctx_train(const llama_model * model) { return llama_model_n_ctx_train(model); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7071feef98..c22cdeb209 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -92,7 +92,6 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf) -llama_test(test-tokenizer-0 NAME test-tokenizer-0-nomic-bert-moe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-nomic-bert-moe.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-qwen2.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) diff --git a/tests/test-chat-parser.cpp b/tests/test-chat-parser.cpp index 2113a12840..59e44e07d2 100644 --- a/tests/test-chat-parser.cpp +++ b/tests/test-chat-parser.cpp @@ -7,7 +7,6 @@ // #include #include -#include #include #include "chat-parser.h" @@ -15,8 +14,6 @@ #include "log.h" #include "regex-partial.h" -using json = nlohmann::ordered_json; - template static void assert_equals(const T & expected, const T & actual) { if (expected != actual) { diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index 95d5169917..1c98079217 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -5,16 +5,17 @@ // // cmake -B build && cmake --build build --parallel && ./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null // -#include -#include -#include -#include - #include "chat.h" #include "../src/unicode.h" #include "../src/llama-grammar.h" +#include + +#include +#include +#include + using json = nlohmann::ordered_json; static std::ostream & operator<<(std::ostream & os, const common_chat_msg_diff & diff) { @@ -1040,6 +1041,15 @@ static void test_template_output_parsers() { "\n" "{\"name\": \"python\", \"arguments\": {\"code\":\"# This is a program:\\nprint('hey')\"}}\n" ""); + assert_msg_equals( + simple_assist_msg("", /* reasoning_content= */ "nah uhg"), + common_chat_parse( + "nah uhg", + /* is_partial= */ false, + { + /* .format = */ COMMON_CHAT_FORMAT_HERMES_2_PRO, + /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK, + })); } { auto tmpls = read_templates("models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja"); diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index 8988c347e3..6d64f07376 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -7,6 +7,8 @@ #include "../src/unicode.h" #include "../src/llama-grammar.h" +#include + #include #include #include diff --git a/tests/test-json-schema-to-grammar.cpp b/tests/test-json-schema-to-grammar.cpp index 38cf01d6d8..78ee55e246 100755 --- a/tests/test-json-schema-to-grammar.cpp +++ b/tests/test-json-schema-to-grammar.cpp @@ -6,6 +6,8 @@ #include "../src/llama-grammar.h" +#include + #include #include #include diff --git a/tools/mtmd/CMakeLists.txt b/tools/mtmd/CMakeLists.txt index c3024cec18..4baa15b960 100644 --- a/tools/mtmd/CMakeLists.txt +++ b/tools/mtmd/CMakeLists.txt @@ -1,48 +1,50 @@ # mtmd -# compile mtmd-audio separately to avoid long compile times with miniaudio.h -# TODO @ngxson : move miniaudio.h and stb_image.h to mtmd-helper.cpp, then compile the helper as a separate library -add_library(mtmd_audio STATIC mtmd-audio.cpp mtmd-audio.h) -if (BUILD_SHARED_LIBS) - set_target_properties(mtmd_audio PROPERTIES POSITION_INDEPENDENT_CODE ON) -endif() -target_link_libraries(mtmd_audio PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(mtmd_audio PRIVATE cxx_std_17) -target_include_directories(mtmd_audio PRIVATE .) +find_package(Threads REQUIRED) -add_library(mtmd OBJECT +add_library(mtmd mtmd.cpp - mtmd-helper.cpp + mtmd-audio.cpp mtmd.h clip.cpp clip.h clip-impl.h + mtmd-helper.cpp + mtmd-helper.h ) -target_link_libraries(mtmd PRIVATE ggml llama mtmd_audio ${CMAKE_THREAD_LIBS_INIT}) - -target_include_directories(mtmd PUBLIC .) +target_link_libraries (mtmd PUBLIC ggml llama) +target_link_libraries (mtmd PRIVATE Threads::Threads) +target_include_directories(mtmd PUBLIC .) target_include_directories(mtmd PRIVATE ../..) -target_include_directories(mtmd PRIVATE ../../common) # for stb_image.h +target_include_directories(mtmd PRIVATE ../../vendor) +target_compile_features (mtmd PRIVATE cxx_std_17) -target_compile_features(mtmd PRIVATE cxx_std_17) - -add_library(mtmd_static STATIC $) if (BUILD_SHARED_LIBS) - set_target_properties(mtmd PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_compile_definitions(mtmd PRIVATE LLAMA_SHARED LLAMA_BUILD) - add_library(mtmd_shared SHARED $) - target_link_libraries(mtmd_shared PRIVATE ggml llama mtmd_audio ${CMAKE_THREAD_LIBS_INIT}) - install(TARGETS mtmd_shared LIBRARY) + set_target_properties (mtmd PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(mtmd PRIVATE LLAMA_BUILD) + target_compile_definitions(mtmd PUBLIC LLAMA_SHARED) endif() +set(MTMD_PUBLIC_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/mtmd.h + ${CMAKE_CURRENT_SOURCE_DIR}/mtmd-helper.h + ) + +set_target_properties(mtmd + PROPERTIES + PUBLIC_HEADER "${MTMD_PUBLIC_HEADERS}") + +install(TARGETS mtmd LIBRARY PUBLIC_HEADER) + if (NOT MSVC) - target_compile_options(mtmd PRIVATE -Wno-cast-qual) # stb_image.h - target_compile_options(mtmd_audio PRIVATE -Wno-cast-qual) # miniaudio.h + # for stb_image.h and miniaudio.h + target_compile_options(mtmd PRIVATE -Wno-cast-qual) endif() -if(TARGET BUILD_INFO) - add_dependencies(mtmd BUILD_INFO) +if (TARGET BUILD_INFO) + add_dependencies(mtmd BUILD_INFO) + add_dependencies(mtmd-helper BUILD_INFO) endif() add_executable(llama-llava-cli deprecation-warning.cpp) @@ -51,8 +53,8 @@ add_executable(llama-minicpmv-cli deprecation-warning.cpp) add_executable(llama-qwen2vl-cli deprecation-warning.cpp) set(TARGET llama-mtmd-cli) -add_executable(${TARGET} mtmd-cli.cpp) -set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-mtmd-cli) -install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common mtmd ${CMAKE_THREAD_LIBS_INIT}) +add_executable (${TARGET} mtmd-cli.cpp) +set_target_properties (${TARGET} PROPERTIES OUTPUT_NAME llama-mtmd-cli) +install (TARGETS ${TARGET} RUNTIME) +target_link_libraries (${TARGET} PRIVATE common mtmd Threads::Threads) target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 6ae2c2ce46..c25bacc177 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -11,9 +11,6 @@ #include "ggml-backend.h" #include "gguf.h" -#define STB_IMAGE_IMPLEMENTATION -#include "stb_image.h" - #include #include #include @@ -2786,30 +2783,6 @@ void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny memcpy(img->buf.data(), rgb_pixels, img->buf.size()); } -bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { - int nx, ny, nc; - auto * data = stbi_load(fname, &nx, &ny, &nc, 3); - if (!data) { - LOG_ERR("%s: failed to load image '%s'\n", __func__, fname); - return false; - } - clip_build_img_from_pixels(data, nx, ny, img); - stbi_image_free(data); - return true; -} - -bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) { - int nx, ny, nc; - auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); - if (!data) { - LOG_ERR("%s: failed to decode image bytes\n", __func__); - return false; - } - clip_build_img_from_pixels(data, nx, ny, img); - stbi_image_free(data); - return true; -} - // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) { dst.nx = src.nx; diff --git a/tools/mtmd/mtmd-audio.cpp b/tools/mtmd/mtmd-audio.cpp index ae06a695d9..4d053895cd 100644 --- a/tools/mtmd/mtmd-audio.cpp +++ b/tools/mtmd/mtmd-audio.cpp @@ -1,28 +1,5 @@ -// fix problem with std::min and std::max -#if defined(_WIN32) -#define WIN32_LEAN_AND_MEAN -#ifndef NOMINMAX -# define NOMINMAX -#endif -#include -#endif - #include "mtmd-audio.h" -//#define MTMD_AUDIO_DEBUG - -#define MINIAUDIO_IMPLEMENTATION -#ifndef MTMD_AUDIO_DEBUG -# define MA_NO_ENCODING -#endif -#define MA_NO_DEVICE_IO -#define MA_NO_RESOURCE_MANAGER -#define MA_NO_NODE_GRAPH -#define MA_NO_ENGINE -#define MA_NO_GENERATION -#define MA_API static -#include "miniaudio.h" - #define _USE_MATH_DEFINES // for M_PI #include #include @@ -359,69 +336,6 @@ bool preprocess_audio( } // namespace whisper_preprocessor -namespace audio_helpers { - -bool is_audio_file(const char * buf, size_t len) { - if (len < 12) { - return false; - } - - // RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format - // WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html - bool is_wav = memcmp(buf, "RIFF", 4) == 0 && memcmp(buf + 8, "WAVE", 4) == 0; - bool is_mp3 = len >= 3 && ( - memcmp(buf, "ID3", 3) == 0 || - // Check for MPEG sync word (simplified check) - ((unsigned char)buf[0] == 0xFF && ((unsigned char)buf[1] & 0xE0) == 0xE0) - ); - bool is_flac = memcmp(buf, "fLaC", 4) == 0; - - return is_wav || is_mp3 || is_flac; -} - -// returns true if the buffer is a valid audio file -bool decode_audio_from_buf(const unsigned char * buf_in, size_t len, int target_sampler_rate, std::vector & pcmf32_mono) { - ma_result result; - const int channels = 1; - ma_decoder_config decoder_config = ma_decoder_config_init(ma_format_f32, channels, target_sampler_rate); - ma_decoder decoder; - - result = ma_decoder_init_memory(buf_in, len, &decoder_config, &decoder); - if (result != MA_SUCCESS) { - return false; - } - - ma_uint64 frame_count; - ma_uint64 frames_read; - result = ma_decoder_get_length_in_pcm_frames(&decoder, &frame_count); - if (result != MA_SUCCESS) { - ma_decoder_uninit(&decoder); - return false; - } - - pcmf32_mono.resize(frame_count); - result = ma_decoder_read_pcm_frames(&decoder, pcmf32_mono.data(), frame_count, &frames_read); - if (result != MA_SUCCESS) { - ma_decoder_uninit(&decoder); - return false; - } - -#ifdef MTMD_AUDIO_DEBUG - // save audio to wav file - ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, ma_format_f32, 1, target_sampler_rate); - ma_encoder encoder; - ma_encoder_init_file("output.wav", &config, &encoder); - ma_encoder_write_pcm_frames(&encoder, pcmf32_mono.data(), pcmf32_mono.size(), &frames_read); - ma_encoder_uninit(&encoder); -#endif - - ma_decoder_uninit(&decoder); - return true; -} - -} // namespace wav_utils - - // precalculated mel filter banks // values are multiplied by 1000.0 to save space, and will be divided by 1000.0 in the end of the function // diff --git a/tools/mtmd/mtmd-audio.h b/tools/mtmd/mtmd-audio.h index 348d11dcaf..b7b940affb 100644 --- a/tools/mtmd/mtmd-audio.h +++ b/tools/mtmd/mtmd-audio.h @@ -32,7 +32,7 @@ struct whisper_filters { std::vector data; }; -extern bool preprocess_audio( +bool preprocess_audio( const float * samples, size_t n_samples, const whisper_filters & filters, @@ -40,23 +40,8 @@ extern bool preprocess_audio( } // namespace whisper_preprocessor - -// TODO @ngxson : move this helper to mtmd-helpers.cpp -namespace audio_helpers { - -extern bool is_audio_file(const char * buf, size_t len); - -extern bool decode_audio_from_buf( - const unsigned char * buf_in, - size_t len, - int target_sampler_rate, - std::vector & pcmf32_mono); - -} // namespace audio_helpers - - namespace whisper_precalc_filters { -extern whisper_preprocessor::whisper_filters get_128_bins(); +whisper_preprocessor::whisper_filters get_128_bins(); } // namespace whisper_precalc_filters diff --git a/tools/mtmd/mtmd-cli.cpp b/tools/mtmd/mtmd-cli.cpp index a70f11ca9d..508a64c586 100644 --- a/tools/mtmd/mtmd-cli.cpp +++ b/tools/mtmd/mtmd-cli.cpp @@ -7,6 +7,7 @@ #include "console.h" #include "chat.h" #include "mtmd.h" +#include "mtmd-helper.h" #include #include @@ -143,7 +144,7 @@ struct mtmd_cli_context { } bool load_media(const std::string & fname) { - mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_file(fname.c_str())); + mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_file(ctx_vision.get(), fname.c_str())); if (!bmp.ptr) { return false; } diff --git a/tools/mtmd/mtmd-helper.cpp b/tools/mtmd/mtmd-helper.cpp index e6c926080c..64f03fd1e7 100644 --- a/tools/mtmd/mtmd-helper.cpp +++ b/tools/mtmd/mtmd-helper.cpp @@ -1,10 +1,37 @@ +// fix problem with std::min and std::max +#if defined(_WIN32) +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +# define NOMINMAX +#endif +#include +#endif + #include "mtmd.h" +#include "mtmd-helper.h" #include "llama.h" #include #include #include +//#define MTMD_AUDIO_DEBUG + +#define MINIAUDIO_IMPLEMENTATION +#ifndef MTMD_AUDIO_DEBUG +# define MA_NO_ENCODING +#endif +#define MA_NO_DEVICE_IO +#define MA_NO_RESOURCE_MANAGER +#define MA_NO_NODE_GRAPH +#define MA_NO_ENGINE +#define MA_NO_GENERATION +#define MA_API static +#include "miniaudio/miniaudio.h" + +#define STB_IMAGE_IMPLEMENTATION +#include "stb/stb_image.h" + #define LOG_INF(...) fprintf(stdout, __VA_ARGS__) #define LOG_ERR(...) fprintf(stderr, __VA_ARGS__) @@ -315,3 +342,118 @@ int32_t mtmd_helper_eval_chunks(mtmd_context * ctx, return 0; } + +namespace audio_helpers { + +static bool is_audio_file(const char * buf, size_t len) { + if (len < 12) { + return false; + } + + // RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format + // WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html + bool is_wav = memcmp(buf, "RIFF", 4) == 0 && memcmp(buf + 8, "WAVE", 4) == 0; + bool is_mp3 = len >= 3 && ( + memcmp(buf, "ID3", 3) == 0 || + // Check for MPEG sync word (simplified check) + ((unsigned char)buf[0] == 0xFF && ((unsigned char)buf[1] & 0xE0) == 0xE0) + ); + bool is_flac = memcmp(buf, "fLaC", 4) == 0; + + return is_wav || is_mp3 || is_flac; +} + +// returns true if the buffer is a valid audio file +static bool decode_audio_from_buf(const unsigned char * buf_in, size_t len, int target_sampler_rate, std::vector & pcmf32_mono) { + ma_result result; + const int channels = 1; + ma_decoder_config decoder_config = ma_decoder_config_init(ma_format_f32, channels, target_sampler_rate); + ma_decoder decoder; + + result = ma_decoder_init_memory(buf_in, len, &decoder_config, &decoder); + if (result != MA_SUCCESS) { + return false; + } + + ma_uint64 frame_count; + ma_uint64 frames_read; + result = ma_decoder_get_length_in_pcm_frames(&decoder, &frame_count); + if (result != MA_SUCCESS) { + ma_decoder_uninit(&decoder); + return false; + } + + pcmf32_mono.resize(frame_count); + result = ma_decoder_read_pcm_frames(&decoder, pcmf32_mono.data(), frame_count, &frames_read); + if (result != MA_SUCCESS) { + ma_decoder_uninit(&decoder); + return false; + } + +#ifdef MTMD_AUDIO_DEBUG + // save audio to wav file + ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, ma_format_f32, 1, target_sampler_rate); + ma_encoder encoder; + ma_encoder_init_file("output.wav", &config, &encoder); + ma_encoder_write_pcm_frames(&encoder, pcmf32_mono.data(), pcmf32_mono.size(), &frames_read); + ma_encoder_uninit(&encoder); +#endif + + ma_decoder_uninit(&decoder); + return true; +} + +} // namespace audio_helpers + +mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(mtmd_context * ctx, const unsigned char * buf, size_t len) { + if (audio_helpers::is_audio_file((const char *)buf, len)) { + std::vector pcmf32; + int bitrate = mtmd_get_audio_bitrate(ctx); + if (bitrate < 0) { + LOG_ERR("This model does not support audio input\n"); + return nullptr; + } + if (!audio_helpers::decode_audio_from_buf(buf, len, bitrate, pcmf32)) { + LOG_ERR("Unable to read WAV audio file from buffer\n"); + return nullptr; + } + return mtmd_bitmap_init_from_audio(pcmf32.size(), pcmf32.data()); + } + + // otherwise, we assume it's an image + mtmd_bitmap * result = nullptr; + { + int nx, ny, nc; + auto * data = stbi_load_from_memory(buf, len, &nx, &ny, &nc, 3); + if (!data) { + LOG_ERR("%s: failed to decode image bytes\n", __func__); + return nullptr; + } + result = mtmd_bitmap_init(nx, ny, data); + stbi_image_free(data); + } + return result; +} + +mtmd_bitmap * mtmd_helper_bitmap_init_from_file(mtmd_context * ctx, const char * fname) { + std::vector buf; + FILE * f = fopen(fname, "rb"); + if (!f) { + LOG_ERR("Unable to open file %s: %s\n", fname, strerror(errno)); + return nullptr; + } + + fseek(f, 0, SEEK_END); + long file_size = ftell(f); + fseek(f, 0, SEEK_SET); + buf.resize(file_size); + + size_t n_read = fread(buf.data(), 1, file_size, f); + fclose(f); + if (n_read != (size_t)file_size) { + LOG_ERR("Failed to read entire file %s", fname); + return nullptr; + } + + return mtmd_helper_bitmap_init_from_buf(ctx, buf.data(), buf.size()); +} diff --git a/tools/mtmd/mtmd-helper.h b/tools/mtmd/mtmd-helper.h new file mode 100644 index 0000000000..5c0edc6937 --- /dev/null +++ b/tools/mtmd/mtmd-helper.h @@ -0,0 +1,91 @@ +#ifndef MTMD_HELPER_H +#define MTMD_HELPER_H + +#include "ggml.h" +#include "llama.h" +#include "mtmd.h" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// +// libmtmd helper functions +// +// Please note that these helpers are not guaranteed to be stable. +// BREAKING CHANGES are expected. +// + +// helper function to construct a mtmd_bitmap from a file +// it calls mtmd_helper_bitmap_init_from_buf() internally +// returns nullptr on failure +// this function is thread-safe +MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_file(mtmd_context * ctx, const char * fname); + +// helper function to construct a mtmd_bitmap from a buffer containing a file +// supported formats: +// image: formats supported by stb_image: jpg, png, bmp, gif, etc. +// audio: formats supported by miniaudio: wav, mp3, flac +// note: audio files will be auto-detected based on magic bytes +// returns nullptr on failure +// this function is thread-safe +MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(mtmd_context * ctx, const unsigned char * buf, size_t len); + +// helper to count the total number of tokens from a list of chunks, useful to keep track of KV cache +MTMD_API size_t mtmd_helper_get_n_tokens(const mtmd_input_chunks * chunks); + +// helper to count the total position of tokens from a list of chunks, useful to keep track of n_past +// normally, n_pos is equal to n_tokens, but for M-RoPE it is different +MTMD_API llama_pos mtmd_helper_get_n_pos(const mtmd_input_chunks * chunks); + +// helper function that automatically: +// 1. run llama_decode() on text chunks +// 2. run mtmd_encode() on image chunks, then mtmd_get_output_embd() and then llama_decode() +// if any of the mtmd_encode() or llama_decode() calls return non-zero, stop and forward the error +// otherwise, returns 0 on success +// this function is NOT thread-safe +MTMD_API int32_t mtmd_helper_eval_chunks(mtmd_context * ctx, + struct llama_context * lctx, + const mtmd_input_chunks * chunks, + llama_pos n_past, + llama_seq_id seq_id, + int32_t n_batch, + bool logits_last, + llama_pos * new_n_past); + +// works like mtmd_helper_eval_chunks(), but only for a single chunk +// this function is NOT thread-safe +MTMD_API int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx, + struct llama_context * lctx, + const mtmd_input_chunk * chunk, + llama_pos n_past, + llama_seq_id seq_id, + int32_t n_batch, + bool logits_last, + llama_pos * new_n_past); + +// helper function to decode an image whose embeddings have already been calculated +// this helper will handle batching and pre/post decoding setup (for ex. gemma 3 requires non-causal attention) +// ret 0 on success, -1 on chunk not being a valid image chunk, 1 on decode failure +MTMD_API int32_t mtmd_helper_decode_image_chunk(mtmd_context * ctx, + struct llama_context * lctx, + const mtmd_input_chunk * chunk, + float * encoded_embd, + llama_pos n_past, + llama_seq_id seq_id, + int32_t n_batch, + llama_pos * new_n_past); + +#ifdef __cplusplus +} // extern "C" +#endif + +// +// C++ wrappers +// + +#endif diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index 52bf71e2c9..8573f11437 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -819,53 +819,12 @@ bool mtmd_support_audio(mtmd_context * ctx) { return ctx->ctx_a != nullptr; } -// these 2 helpers below use internal clip_image_u8_ptr, -// so unfortunately they cannot moved to mtmd-helper.h -// however, in theory, user can decode image file to bitmap using -// whichever library they want, and then use mtmd_bitmap_init() to create bitmap - -mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(const unsigned char * buf, size_t len) { - if (audio_helpers::is_audio_file((const char *)buf, len)) { - std::vector pcmf32; - if (!audio_helpers::decode_audio_from_buf(buf, len, COMMON_SAMPLE_RATE, pcmf32)) { - LOG_ERR("Unable to read WAV audio file from buffer\n"); - return nullptr; - } - return mtmd_bitmap_init_from_audio(pcmf32.size(), pcmf32.data()); +int mtmd_get_audio_bitrate(mtmd_context * ctx) { + if (!ctx->ctx_a) { + return -1; } - - clip_image_u8_ptr img_u8(clip_image_u8_init()); - bool ok = clip_image_load_from_bytes(buf, len, img_u8.get()); - if (!ok) { - LOG_ERR("Unable to load image from buffer\n"); - return nullptr; - } - uint32_t nx, ny; - unsigned char * data = clip_image_u8_get_data(img_u8.get(), &nx, &ny); - return mtmd_bitmap_init(nx, ny, data); -} - -mtmd_bitmap * mtmd_helper_bitmap_init_from_file(const char * fname) { - std::vector buf; - FILE * f = fopen(fname, "rb"); - if (!f) { - LOG_ERR("Unable to open file %s: %s\n", fname, strerror(errno)); - return nullptr; - } - - fseek(f, 0, SEEK_END); - long file_size = ftell(f); - fseek(f, 0, SEEK_SET); - buf.resize(file_size); - - size_t n_read = fread(buf.data(), 1, file_size, f); - fclose(f); - if (n_read != (size_t)file_size) { - LOG_ERR("Failed to read entire file %s", fname); - return nullptr; - } - - return mtmd_helper_bitmap_init_from_buf(buf.data(), buf.size()); + // for now, we assume that all audio models have the same bitrate + return 16000; // 16kHz } // diff --git a/tools/mtmd/mtmd.h b/tools/mtmd/mtmd.h index b53f215a2f..f4ea07d3ad 100644 --- a/tools/mtmd/mtmd.h +++ b/tools/mtmd/mtmd.h @@ -3,7 +3,6 @@ #include "ggml.h" #include "llama.h" -#include "clip.h" #include #include @@ -109,6 +108,10 @@ MTMD_API bool mtmd_support_vision(mtmd_context * ctx); // whether the current model supports audio input MTMD_API bool mtmd_support_audio(mtmd_context * ctx); +// get audio bitrate in Hz, for example 16000 for Whisper +// return -1 if audio is not supported +MTMD_API int mtmd_get_audio_bitrate(mtmd_context * ctx); + // mtmd_bitmap // // if bitmap is image: @@ -209,75 +212,6 @@ MTMD_API float * mtmd_get_output_embd(mtmd_context * ctx); ///////////////////////////////////////// -// -// Helper functions (can be implemented based on other functions) -// -// Please note that these helpers are not guaranteed to be stable. -// BREAKING CHANGES are expected. -// - -// helper function to construct a mtmd_bitmap from a file -// it calls mtmd_helper_bitmap_init_from_buf() internally -// returns nullptr on failure -// this function is thread-safe -MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_file(const char * fname); - -// helper function to construct a mtmd_bitmap from a buffer containing a file -// supported formats: -// image: formats supported by stb_image: jpg, png, bmp, gif, etc. -// audio: formats supported by miniaudio: wav, mp3, flac -// note: audio files will be auto-detected based on magic bytes -// returns nullptr on failure -// this function is thread-safe -MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(const unsigned char * buf, size_t len); - -// helper to count the total number of tokens from a list of chunks, useful to keep track of KV cache -MTMD_API size_t mtmd_helper_get_n_tokens(const mtmd_input_chunks * chunks); - -// helper to count the total position of tokens from a list of chunks, useful to keep track of n_past -// normally, n_pos is equal to n_tokens, but for M-RoPE it is different -MTMD_API llama_pos mtmd_helper_get_n_pos(const mtmd_input_chunks * chunks); - -// helper function that automatically: -// 1. run llama_decode() on text chunks -// 2. run mtmd_encode() on image chunks, then mtmd_get_output_embd() and then llama_decode() -// if any of the mtmd_encode() or llama_decode() calls return non-zero, stop and forward the error -// otherwise, returns 0 on success -// this function is NOT thread-safe -MTMD_API int32_t mtmd_helper_eval_chunks(mtmd_context * ctx, - struct llama_context * lctx, - const mtmd_input_chunks * chunks, - llama_pos n_past, - llama_seq_id seq_id, - int32_t n_batch, - bool logits_last, - llama_pos * new_n_past); - -// works like mtmd_helper_eval_chunks(), but only for a single chunk -// this function is NOT thread-safe -MTMD_API int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx, - struct llama_context * lctx, - const mtmd_input_chunk * chunk, - llama_pos n_past, - llama_seq_id seq_id, - int32_t n_batch, - bool logits_last, - llama_pos * new_n_past); - -// helper function to decode an image whose embeddings have already been calculated -// this helper will handle batching and pre/post decoding setup (for ex. gemma 3 requires non-causal attention) -// ret 0 on success, -1 on chunk not being a valid image chunk, 1 on decode failure -MTMD_API int32_t mtmd_helper_decode_image_chunk(mtmd_context * ctx, - struct llama_context * lctx, - const mtmd_input_chunk * chunk, - float * encoded_embd, - llama_pos n_past, - llama_seq_id seq_id, - int32_t n_batch, - llama_pos * new_n_past); - -///////////////////////////////////////// - // test function, to be used in test-mtmd-c-api.c MTMD_API mtmd_input_chunks * mtmd_test_create_input_chunks(void); diff --git a/tools/run/run.cpp b/tools/run/run.cpp index 702c307d8b..4aef93863c 100644 --- a/tools/run/run.cpp +++ b/tools/run/run.cpp @@ -1,3 +1,13 @@ +#include "chat.h" +#include "common.h" +#include "llama-cpp.h" +#include "log.h" + +#include "linenoise.cpp/linenoise.h" + +#define JSON_ASSERT GGML_ASSERT +#include + #if defined(_WIN32) # include # include @@ -24,13 +34,6 @@ #include #include -#include "chat.h" -#include "common.h" -#include "json.hpp" -#include "linenoise.cpp/linenoise.h" -#include "llama-cpp.h" -#include "log.h" - #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32) [[noreturn]] static void sigint_handler(int) { printf("\n" LOG_COL_DEFAULT); diff --git a/tools/server/CMakeLists.txt b/tools/server/CMakeLists.txt index 17109fddbd..c2a56aaa7f 100644 --- a/tools/server/CMakeLists.txt +++ b/tools/server/CMakeLists.txt @@ -12,7 +12,6 @@ endif() set(TARGET_SRCS server.cpp utils.hpp - httplib.h ) set(PUBLIC_ASSETS index.html.gz diff --git a/tools/server/public/index.html.gz b/tools/server/public/index.html.gz index 8d4e392ff3..f8e3043421 100644 Binary files a/tools/server/public/index.html.gz and b/tools/server/public/index.html.gz differ diff --git a/tools/server/server.cpp b/tools/server/server.cpp index fe6c685ec7..4b92eeac94 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -9,10 +9,8 @@ #include "sampling.h" #include "speculative.h" #include "mtmd.h" +#include "mtmd-helper.h" -// Change JSON_ASSERT from assert() to GGML_ASSERT: -#define JSON_ASSERT GGML_ASSERT -#include "json.hpp" // mime type for sending response #define MIMETYPE_JSON "application/json; charset=utf-8" @@ -2018,11 +2016,6 @@ struct server_context { params_base.n_cache_reuse = 0; SRV_WRN("%s\n", "cache_reuse is not supported by this context, it will be disabled"); } - - if (!params_base.speculative.model.path.empty()) { - SRV_ERR("%s\n", "err: speculative decode is not supported by this context"); - return false; - } } return true; @@ -3216,9 +3209,18 @@ struct server_context { } if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) { - if (llama_kv_self_seq_pos_min(ctx, slot.id) > 0) { + const auto pos_min = llama_kv_self_seq_pos_min(ctx, slot.id); + if (pos_min == -1) { + SLT_ERR(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min); + GGML_ABORT("pos_min == -1, but n_past > 0 - should not happen: https://github.com/ggml-org/llama.cpp/pull/13833#discussion_r2116181237"); + } + + const auto n_swa = llama_model_n_swa(model); + if (pos_min > slot.n_past - n_swa) { + SLT_WRN(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d, n_swa = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min, n_swa); SLT_WRN(slot, "forcing full prompt re-processing due to lack of cache data (likely due to SWA, see %s)\n", "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055"); + llama_kv_self_seq_rm(ctx, slot.id, 0, -1); slot.n_past = 0; } } @@ -3381,8 +3383,10 @@ struct server_context { } } + int32_t i_next = 0; + // process the created batch of tokens - for (int32_t i = 0; i < batch.n_tokens; i += n_batch) { + for (int32_t i = 0; i < batch.n_tokens; i = i_next) { const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i); llama_batch batch_view = { @@ -3427,13 +3431,18 @@ struct server_context { // retry with half the batch size to try to find a free slot in the KV cache n_batch /= 2; - i -= n_batch; - SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size - try increasing it via the context size or enable defragmentation, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret); + SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size, i = %d, n_batch = %d, ret = %d\n", i, n_batch, ret); continue; // continue loop of n_batch } + // move the head of the batch forward with the number of tokens we just processed + i_next = i + n_tokens; + + // on successful decode, restore the original batch size + n_batch = llama_n_batch(ctx); + for (auto & slot : slots) { if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) { continue; // continue loop of slots @@ -4187,7 +4196,7 @@ int main(int argc, char ** argv) { throw std::runtime_error("This server does not support multimodal"); } for (auto & file : files) { - mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(file.data(), file.size())); + mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size())); if (!bmp.ptr) { throw std::runtime_error("Failed to load image or audio file"); } diff --git a/tools/server/utils.hpp b/tools/server/utils.hpp index 8456a02e6f..f3e0392a4e 100644 --- a/tools/server/utils.hpp +++ b/tools/server/utils.hpp @@ -6,17 +6,17 @@ #include "arg.h" // common_remote_get_content #include "base64.hpp" #include "mtmd.h" +#include "mtmd-helper.h" +#include "chat.h" // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 // disable Nagle's algorithm #define CPPHTTPLIB_TCP_NODELAY true -#include "httplib.h" +#include -// Change JSON_ASSERT from assert() to GGML_ASSERT: #define JSON_ASSERT GGML_ASSERT -#include "json.hpp" -#include "chat.h" +#include #include #include @@ -264,13 +264,19 @@ static size_t validate_utf8(const std::string& text) { static llama_tokens format_rerank(const struct llama_vocab * vocab, const llama_tokens & query, const llama_tokens & doc) { llama_tokens result; + // Get EOS token - use SEP token as fallback if EOS is not available + llama_token eos_token = llama_vocab_eos(vocab); + if (eos_token == LLAMA_TOKEN_NULL) { + eos_token = llama_vocab_sep(vocab); + } + result.reserve(doc.size() + query.size() + 4); result.push_back(llama_vocab_bos(vocab)); result.insert(result.end(), query.begin(), query.end()); - result.push_back(llama_vocab_eos(vocab)); + result.push_back(eos_token); result.push_back(llama_vocab_sep(vocab)); result.insert(result.end(), doc.begin(), doc.end()); - result.push_back(llama_vocab_eos(vocab)); + result.push_back(eos_token); return result; } @@ -573,7 +579,7 @@ struct oaicompat_parser_options { // used by /chat/completions endpoint static json oaicompat_chat_params_parse( - const json & body, /* openai api json semantics */ + json & body, /* openai api json semantics */ const oaicompat_parser_options & opt, std::vector & out_files) { @@ -624,7 +630,7 @@ static json oaicompat_chat_params_parse( if (!body.contains("messages")) { throw std::runtime_error("'messages' is required"); } - json messages = body.at("messages"); + json & messages = body.at("messages"); if (!messages.is_array()) { throw std::runtime_error("Expected 'messages' to be an array"); } diff --git a/tools/server/webui/src/App.tsx b/tools/server/webui/src/App.tsx index 1b673bbaa1..02f1719d3d 100644 --- a/tools/server/webui/src/App.tsx +++ b/tools/server/webui/src/App.tsx @@ -5,21 +5,24 @@ import { AppContextProvider, useAppContext } from './utils/app.context'; import ChatScreen from './components/ChatScreen'; import SettingDialog from './components/SettingDialog'; import { Toaster } from 'react-hot-toast'; +import { ModalProvider } from './components/ModalProvider'; function App() { return ( - -
- - - }> - } /> - } /> - - - -
-
+ + +
+ + + }> + } /> + } /> + + + +
+
+
); } diff --git a/tools/server/webui/src/components/ModalProvider.tsx b/tools/server/webui/src/components/ModalProvider.tsx new file mode 100644 index 0000000000..f2ebf8e0a7 --- /dev/null +++ b/tools/server/webui/src/components/ModalProvider.tsx @@ -0,0 +1,151 @@ +import React, { createContext, useState, useContext } from 'react'; + +type ModalContextType = { + showConfirm: (message: string) => Promise; + showPrompt: ( + message: string, + defaultValue?: string + ) => Promise; + showAlert: (message: string) => Promise; +}; +const ModalContext = createContext(null!); + +interface ModalState { + isOpen: boolean; + message: string; + defaultValue?: string; + resolve: ((value: T) => void) | null; +} + +export function ModalProvider({ children }: { children: React.ReactNode }) { + const [confirmState, setConfirmState] = useState>({ + isOpen: false, + message: '', + resolve: null, + }); + const [promptState, setPromptState] = useState< + ModalState + >({ isOpen: false, message: '', resolve: null }); + const [alertState, setAlertState] = useState>({ + isOpen: false, + message: '', + resolve: null, + }); + const inputRef = React.useRef(null); + + const showConfirm = (message: string): Promise => { + return new Promise((resolve) => { + setConfirmState({ isOpen: true, message, resolve }); + }); + }; + + const showPrompt = ( + message: string, + defaultValue?: string + ): Promise => { + return new Promise((resolve) => { + setPromptState({ isOpen: true, message, defaultValue, resolve }); + }); + }; + + const showAlert = (message: string): Promise => { + return new Promise((resolve) => { + setAlertState({ isOpen: true, message, resolve }); + }); + }; + + const handleConfirm = (result: boolean) => { + confirmState.resolve?.(result); + setConfirmState({ isOpen: false, message: '', resolve: null }); + }; + + const handlePrompt = (result?: string) => { + promptState.resolve?.(result); + setPromptState({ isOpen: false, message: '', resolve: null }); + }; + + const handleAlertClose = () => { + alertState.resolve?.(); + setAlertState({ isOpen: false, message: '', resolve: null }); + }; + + return ( + + {children} + + {/* Confirm Modal */} + {confirmState.isOpen && ( + +
+

{confirmState.message}

+
+ + +
+
+
+ )} + + {/* Prompt Modal */} + {promptState.isOpen && ( + +
+

{promptState.message}

+ { + if (e.key === 'Enter') { + handlePrompt((e.target as HTMLInputElement).value); + } + }} + /> +
+ + +
+
+
+ )} + + {/* Alert Modal */} + {alertState.isOpen && ( + +
+

{alertState.message}

+
+ +
+
+
+ )} +
+ ); +} + +export function useModals() { + const context = useContext(ModalContext); + if (!context) throw new Error('useModals must be used within ModalProvider'); + return context; +} diff --git a/tools/server/webui/src/components/SettingDialog.tsx b/tools/server/webui/src/components/SettingDialog.tsx index e4684be7e0..45a8d73b00 100644 --- a/tools/server/webui/src/components/SettingDialog.tsx +++ b/tools/server/webui/src/components/SettingDialog.tsx @@ -13,6 +13,7 @@ import { SquaresPlusIcon, } from '@heroicons/react/24/outline'; import { OpenInNewTab } from '../utils/common'; +import { useModals } from './ModalProvider'; type SettKey = keyof typeof CONFIG_DEFAULT; @@ -282,14 +283,15 @@ export default function SettingDialog({ const [localConfig, setLocalConfig] = useState( JSON.parse(JSON.stringify(config)) ); + const { showConfirm, showAlert } = useModals(); - const resetConfig = () => { - if (window.confirm('Are you sure you want to reset all settings?')) { + const resetConfig = async () => { + if (await showConfirm('Are you sure you want to reset all settings?')) { setLocalConfig(CONFIG_DEFAULT); } }; - const handleSave = () => { + const handleSave = async () => { // copy the local config to prevent direct mutation const newConfig: typeof CONFIG_DEFAULT = JSON.parse( JSON.stringify(localConfig) @@ -302,14 +304,14 @@ export default function SettingDialog({ const mustBeNumeric = isNumeric(CONFIG_DEFAULT[key as SettKey]); if (mustBeString) { if (!isString(value)) { - alert(`Value for ${key} must be string`); + await showAlert(`Value for ${key} must be string`); return; } } else if (mustBeNumeric) { const trimmedValue = value.toString().trim(); const numVal = Number(trimmedValue); if (isNaN(numVal) || !isNumeric(numVal) || trimmedValue.length === 0) { - alert(`Value for ${key} must be numeric`); + await showAlert(`Value for ${key} must be numeric`); return; } // force conversion to number @@ -317,7 +319,7 @@ export default function SettingDialog({ newConfig[key] = numVal; } else if (mustBeBoolean) { if (!isBoolean(value)) { - alert(`Value for ${key} must be boolean`); + await showAlert(`Value for ${key} must be boolean`); return; } } else { diff --git a/tools/server/webui/src/components/Sidebar.tsx b/tools/server/webui/src/components/Sidebar.tsx index 8cac52f4c6..a77cb83b45 100644 --- a/tools/server/webui/src/components/Sidebar.tsx +++ b/tools/server/webui/src/components/Sidebar.tsx @@ -14,6 +14,7 @@ import { import { BtnWithTooltips } from '../utils/common'; import { useAppContext } from '../utils/app.context'; import toast from 'react-hot-toast'; +import { useModals } from './ModalProvider'; export default function Sidebar() { const params = useParams(); @@ -38,6 +39,7 @@ export default function Sidebar() { StorageUtils.offConversationChanged(handleConversationChange); }; }, []); + const { showConfirm, showPrompt } = useModals(); const groupedConv = useMemo( () => groupConversationsByDate(conversations), @@ -130,7 +132,7 @@ export default function Sidebar() { onSelect={() => { navigate(`/chat/${conv.id}`); }} - onDelete={() => { + onDelete={async () => { if (isGenerating(conv.id)) { toast.error( 'Cannot delete conversation while generating' @@ -138,7 +140,7 @@ export default function Sidebar() { return; } if ( - window.confirm( + await showConfirm( 'Are you sure to delete this conversation?' ) ) { @@ -167,14 +169,14 @@ export default function Sidebar() { document.body.removeChild(a); URL.revokeObjectURL(url); }} - onRename={() => { + onRename={async () => { if (isGenerating(conv.id)) { toast.error( 'Cannot rename conversation while generating' ); return; } - const newName = window.prompt( + const newName = await showPrompt( 'Enter new name for the conversation', conv.name ); diff --git a/tools/tts/tts.cpp b/tools/tts/tts.cpp index a7c536eea3..a71e9bf5b5 100644 --- a/tools/tts/tts.cpp +++ b/tools/tts/tts.cpp @@ -5,7 +5,9 @@ #include "sampling.h" #include "log.h" #include "llama.h" -#include "json.hpp" + +#define JSON_ASSERT GGML_ASSERT +#include #include #include diff --git a/tools/server/httplib.h b/vendor/cpp-httplib/httplib.h similarity index 99% rename from tools/server/httplib.h rename to vendor/cpp-httplib/httplib.h index 0f981dc895..0aa4e62746 100644 --- a/tools/server/httplib.h +++ b/vendor/cpp-httplib/httplib.h @@ -8,7 +8,7 @@ #ifndef CPPHTTPLIB_HTTPLIB_H #define CPPHTTPLIB_HTTPLIB_H -#define CPPHTTPLIB_VERSION "0.20.0" +#define CPPHTTPLIB_VERSION "0.20.1" /* * Configuration @@ -145,6 +145,10 @@ #define CPPHTTPLIB_LISTEN_BACKLOG 5 #endif +#ifndef CPPHTTPLIB_MAX_LINE_LENGTH +#define CPPHTTPLIB_MAX_LINE_LENGTH 32768 +#endif + /* * Headers */ @@ -3067,6 +3071,11 @@ inline bool stream_line_reader::getline() { #endif for (size_t i = 0;; i++) { + if (size() >= CPPHTTPLIB_MAX_LINE_LENGTH) { + // Treat exceptionally long lines as an error to + // prevent infinite loops/memory exhaustion + return false; + } char byte; auto n = strm_.read(&byte, 1); @@ -6055,6 +6064,8 @@ inline void calc_actual_timeout(time_t max_timeout_msec, time_t duration_msec, auto actual_timeout_msec = (std::min)(max_timeout_msec - duration_msec, timeout_msec); + if (actual_timeout_msec < 0) { actual_timeout_msec = 0; } + actual_timeout_sec = actual_timeout_msec / 1000; actual_timeout_usec = (actual_timeout_msec % 1000) * 1000; } @@ -7327,8 +7338,9 @@ Server::process_request(Stream &strm, const std::string &remote_addr, } // Setup `is_connection_closed` method - req.is_connection_closed = [&]() { - return !detail::is_socket_alive(strm.socket()); + auto sock = strm.socket(); + req.is_connection_closed = [sock]() { + return !detail::is_socket_alive(sock); }; // Routing diff --git a/tools/mtmd/miniaudio.h b/vendor/miniaudio/miniaudio.h similarity index 100% rename from tools/mtmd/miniaudio.h rename to vendor/miniaudio/miniaudio.h diff --git a/common/minja/chat-template.hpp b/vendor/minja/chat-template.hpp similarity index 99% rename from common/minja/chat-template.hpp rename to vendor/minja/chat-template.hpp index c930a587a8..ab5b521dd4 100644 --- a/common/minja/chat-template.hpp +++ b/vendor/minja/chat-template.hpp @@ -22,7 +22,7 @@ #include #include -#include +#include using json = nlohmann::ordered_json; diff --git a/common/minja/minja.hpp b/vendor/minja/minja.hpp similarity index 99% rename from common/minja/minja.hpp rename to vendor/minja/minja.hpp index b3b00547d8..f9658ddc01 100644 --- a/common/minja/minja.hpp +++ b/vendor/minja/minja.hpp @@ -29,7 +29,7 @@ #include #include -#include +#include using json = nlohmann::ordered_json; diff --git a/common/json.hpp b/vendor/nlohmann/json.hpp similarity index 94% rename from common/json.hpp rename to vendor/nlohmann/json.hpp index a858728c4c..82d69f7c5d 100644 --- a/common/json.hpp +++ b/vendor/nlohmann/json.hpp @@ -1,9 +1,9 @@ // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT /****************************************************************************\ @@ -34,10 +34,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -47,10 +47,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -59,20 +59,24 @@ #ifndef JSON_SKIP_LIBRARY_VERSION_CHECK #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) - #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3 + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0 #warning "Already included a different version of the library!" #endif #endif #endif #define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum) #ifndef JSON_DIAGNOSTICS #define JSON_DIAGNOSTICS 0 #endif +#ifndef JSON_DIAGNOSTIC_POSITIONS + #define JSON_DIAGNOSTIC_POSITIONS 0 +#endif + #ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 #endif @@ -83,6 +87,12 @@ #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS #endif +#if JSON_DIAGNOSTIC_POSITIONS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS +#endif + #if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp #else @@ -94,14 +104,15 @@ #endif // Construct the namespace ABI tags component -#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b -#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ - NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) #define NLOHMANN_JSON_ABI_TAGS \ NLOHMANN_JSON_ABI_TAGS_CONCAT( \ NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ - NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS) // Construct the namespace version component #define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ @@ -149,10 +160,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -162,6 +173,9 @@ #include // forward_list #include // inserter, front_inserter, end #include // map +#ifdef JSON_HAS_CPP_17 + #include // optional +#endif #include // string #include // tuple, make_tuple #include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible @@ -172,10 +186,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -192,10 +206,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -208,10 +222,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -220,10 +234,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -233,10 +247,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -320,11 +334,11 @@ NLOHMANN_JSON_NAMESPACE_END // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann -// SPDX-FileCopyrightText: 2016-2021 Evan Nemerson +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-FileCopyrightText: 2016 - 2021 Evan Nemerson // SPDX-License-Identifier: MIT /* Hedley - https://nemequ.github.io/hedley @@ -2384,15 +2398,20 @@ JSON_HEDLEY_DIAGNOSTIC_POP // C++ language standard detection // if the user manually specified the used c++ version this is skipped -#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) - #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#if !defined(JSON_HAS_CPP_23) && !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) + #if (defined(__cplusplus) && __cplusplus > 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG > 202002L) + #define JSON_HAS_CPP_23 #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #elif (defined(__cplusplus) && __cplusplus > 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG > 201703L) + #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #elif (defined(__cplusplus) && __cplusplus > 201402L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus > 201103L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) #define JSON_HAS_CPP_14 #endif // the cpp 11 flag is always specified because it is the minimal required version @@ -2568,7 +2587,9 @@ JSON_HEDLEY_DIAGNOSTIC_POP template \ inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ { \ + /* NOLINTNEXTLINE(modernize-type-traits) we use C++11 */ \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + /* NOLINTNEXTLINE(modernize-avoid-c-arrays) we don't want to depend on */ \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [e](const std::pair& ej_pair) -> bool \ @@ -2580,7 +2601,9 @@ JSON_HEDLEY_DIAGNOSTIC_POP template \ inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ { \ + /* NOLINTNEXTLINE(modernize-type-traits) we use C++11 */ \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + /* NOLINTNEXTLINE(modernize-avoid-c-arrays) we don't want to depend on */ \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [&j](const std::pair& ej_pair) -> bool \ @@ -2743,42 +2766,146 @@ JSON_HEDLEY_DIAGNOSTIC_POP #define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; #define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); -#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); +#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = !nlohmann_json_j.is_null() ? nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1) : nlohmann_json_default_obj.v1; /*! @brief macro @def NLOHMANN_DEFINE_TYPE_INTRUSIVE @since version 3.9.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ */ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT +@since version 3.11.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE +@since version 3.11.3 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } /*! @brief macro @def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE @since version 3.9.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ */ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT +@since version 3.11.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE +@since version 3.11.3 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ +*/ +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } // inspired from https://stackoverflow.com/a/26745591 -// allows to call any std function as if (e.g. with begin): +// allows calling any std function as if (e.g., with begin): // using std::begin; begin(x); // // it allows using the detected idiom to retrieve the return type @@ -2939,10 +3066,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3014,10 +3141,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3056,10 +3183,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-FileCopyrightText: 2018 The Abseil Authors // SPDX-License-Identifier: MIT @@ -3219,7 +3346,7 @@ struct static_const #endif template -inline constexpr std::array make_array(Args&& ... args) +constexpr std::array make_array(Args&& ... args) { return std::array {{static_cast(std::forward(args))...}}; } @@ -3230,27 +3357,27 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include // numeric_limits +#include // char_traits +#include // tuple #include // false_type, is_constructible, is_integral, is_same, true_type #include // declval -#include // tuple -#include // char_traits // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3293,7 +3420,7 @@ struct iterator_traits template struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types + : iterator_types { }; @@ -3315,10 +3442,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3335,10 +3462,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3359,10 +3486,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ @@ -3624,7 +3751,7 @@ struct char_traits : std::char_traits static constexpr int_type eof() noexcept { - return static_cast(EOF); + return static_cast(std::char_traits::eof()); } }; @@ -3648,7 +3775,7 @@ struct char_traits : std::char_traits static constexpr int_type eof() noexcept { - return static_cast(EOF); + return static_cast(std::char_traits::eof()); } }; @@ -3674,19 +3801,19 @@ struct is_default_constructible : std::is_default_constructible {}; template struct is_default_constructible> - : conjunction, is_default_constructible> {}; + : conjunction, is_default_constructible> {}; template struct is_default_constructible> - : conjunction, is_default_constructible> {}; + : conjunction, is_default_constructible> {}; template struct is_default_constructible> - : conjunction...> {}; + : conjunction...> {}; template struct is_default_constructible> - : conjunction...> {}; + : conjunction...> {}; template struct is_constructible : std::is_constructible {}; @@ -3884,8 +4011,8 @@ is_detected::value&& // special case for types like std::filesystem::path whose iterator's value_type are themselves // c.f. https://github.com/nlohmann/json/pull/3073 !std::is_same>::value&& - is_complete_type < - detected_t>::value >> +is_complete_type < +detected_t>::value >> { using value_type = range_value_t; @@ -4008,12 +4135,12 @@ using is_usable_as_key_type = typename std::conditional < template> using is_usable_as_basic_json_key_type = typename std::conditional < - is_usable_as_key_type::value - && !is_json_iterator_of::value, - std::true_type, - std::false_type >::type; + is_usable_as_key_type::value + && !is_json_iterator_of::value, + std::true_type, + std::false_type >::type; template using detect_erase_with_key_type = decltype(std::declval().erase(std::declval())); @@ -4147,7 +4274,7 @@ struct value_in_range_of_impl1 }; template -inline constexpr bool value_in_range_of(T val) +constexpr bool value_in_range_of(T val) { return value_in_range_of_impl1::test(val); } @@ -4163,7 +4290,7 @@ namespace impl { template -inline constexpr bool is_c_string() +constexpr bool is_c_string() { using TUnExt = typename std::remove_extent::type; using TUnCVExt = typename std::remove_cv::type; @@ -4191,7 +4318,7 @@ namespace impl { template -inline constexpr bool is_transparent() +constexpr bool is_transparent() { return is_detected::value; } @@ -4210,10 +4337,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4358,6 +4485,18 @@ inline OutStringType concat(Args && ... args) NLOHMANN_JSON_NAMESPACE_END +// With -Wweak-vtables, Clang will complain about the exception classes as they +// have no out-of-line virtual method definitions and their vtable will be +// emitted in every translation unit. This issue cannot be fixed with a +// header-only library as there is no implementation file to move these +// functions to. As a result, we suppress this warning here to avoid client +// code to stumble over this. See https://github.com/nlohmann/json/issues/4087 +// for a discussion. +#if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wweak-vtables" +#endif + NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { @@ -4452,16 +4591,34 @@ class exception : public std::exception { return concat(a, '/', detail::escape(b)); }); - return concat('(', str, ") "); + + return concat('(', str, ") ", get_byte_positions(leaf_element)); #else - static_cast(leaf_element); - return ""; + return get_byte_positions(leaf_element); #endif } private: /// an exception object as storage for error messages std::runtime_error m; +#if JSON_DIAGNOSTIC_POSITIONS + template + static std::string get_byte_positions(const BasicJsonType* leaf_element) + { + if ((leaf_element->start_pos() != std::string::npos) && (leaf_element->end_pos() != std::string::npos)) + { + return concat("(bytes ", std::to_string(leaf_element->start_pos()), "-", std::to_string(leaf_element->end_pos()), ") "); + } + return ""; + } +#else + template + static std::string get_byte_positions(const BasicJsonType* leaf_element) + { + static_cast(leaf_element); + return ""; + } +#endif }; /// @brief exception indicating a parse error @@ -4589,6 +4746,10 @@ class other_error : public exception } // namespace detail NLOHMANN_JSON_NAMESPACE_END +#if defined(__clang__) + #pragma clang diagnostic pop +#endif + // #include // #include @@ -4596,10 +4757,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4620,10 +4781,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4640,7 +4801,7 @@ namespace std_fs = std::experimental::filesystem; } // namespace detail NLOHMANN_JSON_NAMESPACE_END #elif JSON_HAS_FILESYSTEM -#include +#include // NOLINT(build/c++17) NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { @@ -4670,6 +4831,24 @@ inline void from_json(const BasicJsonType& j, typename std::nullptr_t& n) n = nullptr; } +#ifdef JSON_HAS_CPP_17 +#ifndef JSON_USE_IMPLICIT_CONVERSIONS +template +void from_json(const BasicJsonType& j, std::optional& opt) +{ + if (j.is_null()) + { + opt = std::nullopt; + } + else + { + opt.emplace(j.template get()); + } +} + +#endif // JSON_USE_IMPLICIT_CONVERSIONS +#endif // JSON_HAS_CPP_17 + // overloads for basic_json template parameters template < typename BasicJsonType, typename ArithmeticType, enable_if_t < std::is_arithmetic::value&& @@ -4817,6 +4996,54 @@ auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines } } +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + arr[i1][i2] = j.at(i1).at(i2).template get(); + } + } +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2][N3]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + for (std::size_t i3 = 0; i3 < N3; ++i3) + { + arr[i1][i2][i3] = j.at(i1).at(i2).at(i3).template get(); + } + } + } +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2][N3][N4]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + for (std::size_t i3 = 0; i3 < N3; ++i3) + { + for (std::size_t i4 = 0; i4 < N4; ++i4) + { + arr[i1][i2][i3][i4] = j.at(i1).at(i2).at(i3).at(i4).template get(); + } + } + } + } +} + template inline void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/) { @@ -4902,7 +5129,7 @@ void()) template < typename BasicJsonType, typename T, std::size_t... Idx > std::array from_json_inplace_array_impl(BasicJsonType&& j, - identity_tag> /*unused*/, index_sequence /*unused*/) + identity_tag> /*unused*/, index_sequence /*unused*/) { return { { std::forward(j).at(Idx).template get()... } }; } @@ -5006,6 +5233,12 @@ std::tuple from_json_tuple_impl_base(BasicJsonType&& j, index_sequence< return std::make_tuple(std::forward(j).at(Idx).template get()...); } +template +std::tuple<> from_json_tuple_impl_base(BasicJsonType& /*unused*/, index_sequence<> /*unused*/) +{ + return {}; +} + template < typename BasicJsonType, class A1, class A2 > std::pair from_json_tuple_impl(BasicJsonType&& j, identity_tag> /*unused*/, priority_tag<0> /*unused*/) { @@ -5091,7 +5324,12 @@ inline void from_json(const BasicJsonType& j, std_fs::path& p) { JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j)); } - p = *j.template get_ptr(); + const auto& s = *j.template get_ptr(); +#ifdef JSON_HAS_CPP_20 + p = std_fs::path(std::u8string_view(reinterpret_cast(s.data()), s.size())); +#else + p = std_fs::u8path(s); // accepts UTF-8 encoded std::string in C++17, deprecated in C++20 +#endif } #endif @@ -5126,14 +5364,20 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT +// #include +// JSON_HAS_CPP_17 +#ifdef JSON_HAS_CPP_17 + #include // optional +#endif + #include // copy #include // begin, end #include // string @@ -5146,17 +5390,16 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include // size_t -#include // input_iterator_tag -#include // string, to_string +#include // forward_iterator_tag #include // tuple_size, get, tuple_element #include // move @@ -5168,6 +5411,46 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // size_t +#include // string, to_string + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +void int_to_string(StringType& target, std::size_t value) +{ + // For ADL + using std::to_string; + target = to_string(value); +} + +template +StringType to_string(std::size_t value) +{ + StringType result; + int_to_string(result, value); + return result; +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + // #include @@ -5175,13 +5458,6 @@ NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { -template -void int_to_string( string_type& target, std::size_t value ) -{ - // For ADL - using std::to_string; - target = to_string(value); -} template class iteration_proxy_value { public: @@ -5189,7 +5465,7 @@ template class iteration_proxy_value using value_type = iteration_proxy_value; using pointer = value_type *; using reference = value_type &; - using iterator_category = std::input_iterator_tag; + using iterator_category = std::forward_iterator_tag; using string_type = typename std::remove_cv< typename std::remove_reference().key() ) >::type >::type; private: @@ -5369,7 +5645,7 @@ namespace std #endif template class tuple_size<::nlohmann::detail::iteration_proxy_value> // NOLINT(cert-dcl58-cpp) - : public std::integral_constant {}; + : public std::integral_constant {}; template class tuple_element> // NOLINT(cert-dcl58-cpp) @@ -5390,8 +5666,6 @@ class tuple_element> inline constexpr bool ::std::ranges::enable_borrowed_range<::nlohmann::detail::iteration_proxy> = true; #endif -// #include - // #include // #include @@ -5637,6 +5911,22 @@ struct external_constructor // to_json // ///////////// +#ifdef JSON_HAS_CPP_17 +template::value, int> = 0> +void to_json(BasicJsonType& j, const std::optional& opt) +{ + if (opt.has_value()) + { + j = *opt; + } + else + { + j = nullptr; + } +} +#endif + template::value, int> = 0> inline void to_json(BasicJsonType& j, T b) noexcept @@ -5783,6 +6073,13 @@ inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence< j = { std::get(t)... }; } +template +inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& /*unused*/, index_sequence<> /*unused*/) +{ + using array_t = typename BasicJsonType::array_t; + j = array_t(); +} + template::value, int > = 0> inline void to_json(BasicJsonType& j, const T& t) { @@ -5793,7 +6090,12 @@ inline void to_json(BasicJsonType& j, const T& t) template inline void to_json(BasicJsonType& j, const std_fs::path& p) { - j = p.string(); +#ifdef JSON_HAS_CPP_20 + const std::u8string s = p.u8string(); + j = std::string(s.begin(), s.end()); +#else + j = p.u8string(); // returns std::string in C++17 +#endif } #endif @@ -5868,10 +6170,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -5980,10 +6282,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6113,10 +6415,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6133,16 +6435,19 @@ NLOHMANN_JSON_NAMESPACE_END #include // char_traits, string #include // make_pair, move #include // vector +#ifdef __cpp_lib_byteswap + #include //byteswap +#endif // #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6162,6 +6467,8 @@ NLOHMANN_JSON_NAMESPACE_END #include // istream #endif // JSON_NO_IO +// #include + // #include // #include @@ -6209,6 +6516,13 @@ class file_input_adapter return std::fgetc(m_file); } + // returns the number of characters successfully read + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + return fread(dest, 1, sizeof(T) * count, m_file); + } + private: /// the file pointer to read from std::FILE* m_file; @@ -6268,6 +6582,17 @@ class input_stream_adapter return res; } + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + auto res = static_cast(sb->sgetn(reinterpret_cast(dest), static_cast(count * sizeof(T)))); + if (JSON_HEDLEY_UNLIKELY(res < count * sizeof(T))) + { + is->clear(is->rdstate() | std::ios::eofbit); + } + return res; + } + private: /// the associated input stream std::istream* is = nullptr; @@ -6299,6 +6624,26 @@ class iterator_input_adapter return char_traits::eof(); } + // for general iterators, we cannot really do something better than falling back to processing the range one-by-one + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + auto* ptr = reinterpret_cast(dest); + for (std::size_t read_index = 0; read_index < count * sizeof(T); ++read_index) + { + if (JSON_HEDLEY_LIKELY(current != end)) + { + ptr[read_index] = static_cast(*current); + std::advance(current, 1); + } + else + { + return read_index; + } + } + return count * sizeof(T); + } + private: IteratorType current; IteratorType end; @@ -6462,6 +6807,13 @@ class wide_string_input_adapter return utf8_bytes[utf8_bytes_index++]; } + // parsing binary with wchar doesn't make sense, but since the parsing mode can be runtime, we need something here + template + std::size_t get_elements(T* /*dest*/, std::size_t /*count*/ = 1) + { + JSON_THROW(parse_error::create(112, 1, "wide string type cannot be interpreted as binary data", nullptr)); + } + private: BaseInputAdapter base_adapter; @@ -6558,10 +6910,17 @@ typename container_input_adapter_factory_impl::container_input_adapter_factory::create(container); } +// specialization for std::string +using string_input_adapter_type = decltype(input_adapter(std::declval())); + #ifndef JSON_NO_IO // Special cases with fast paths inline file_input_adapter input_adapter(std::FILE* file) { + if (file == nullptr) + { + JSON_THROW(parse_error::create(101, 0, "attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr)); + } return file_input_adapter(file); } @@ -6588,9 +6947,13 @@ template < typename CharT, int >::type = 0 > contiguous_bytes_input_adapter input_adapter(CharT b) { + if (b == nullptr) + { + JSON_THROW(parse_error::create(101, 0, "attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr)); + } auto length = std::strlen(reinterpret_cast(b)); const auto* ptr = reinterpret_cast(b); - return input_adapter(ptr, ptr + length); + return input_adapter(ptr, ptr + length); // cppcheck-suppress[nullPointerArithmeticRedundantCheck] } template @@ -6636,742 +6999,29 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include #include // string +#include // enable_if_t #include // move #include // vector // #include -// #include - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN - -/*! -@brief SAX interface - -This class describes the SAX interface used by @ref nlohmann::json::sax_parse. -Each function is called in different situations while the input is parsed. The -boolean return value informs the parser whether to continue processing the -input. -*/ -template -struct json_sax -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @brief a null value was read - @return whether parsing should proceed - */ - virtual bool null() = 0; - - /*! - @brief a boolean value was read - @param[in] val boolean value - @return whether parsing should proceed - */ - virtual bool boolean(bool val) = 0; - - /*! - @brief an integer number was read - @param[in] val integer value - @return whether parsing should proceed - */ - virtual bool number_integer(number_integer_t val) = 0; - - /*! - @brief an unsigned integer number was read - @param[in] val unsigned integer value - @return whether parsing should proceed - */ - virtual bool number_unsigned(number_unsigned_t val) = 0; - - /*! - @brief a floating-point number was read - @param[in] val floating-point value - @param[in] s raw token value - @return whether parsing should proceed - */ - virtual bool number_float(number_float_t val, const string_t& s) = 0; - - /*! - @brief a string value was read - @param[in] val string value - @return whether parsing should proceed - @note It is safe to move the passed string value. - */ - virtual bool string(string_t& val) = 0; - - /*! - @brief a binary value was read - @param[in] val binary value - @return whether parsing should proceed - @note It is safe to move the passed binary value. - */ - virtual bool binary(binary_t& val) = 0; - - /*! - @brief the beginning of an object was read - @param[in] elements number of object elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_object(std::size_t elements) = 0; - - /*! - @brief an object key was read - @param[in] val object key - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool key(string_t& val) = 0; - - /*! - @brief the end of an object was read - @return whether parsing should proceed - */ - virtual bool end_object() = 0; - - /*! - @brief the beginning of an array was read - @param[in] elements number of array elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_array(std::size_t elements) = 0; - - /*! - @brief the end of an array was read - @return whether parsing should proceed - */ - virtual bool end_array() = 0; - - /*! - @brief a parse error occurred - @param[in] position the position in the input where the error occurs - @param[in] last_token the last read token - @param[in] ex an exception object describing the error - @return whether parsing should proceed (must return false) - */ - virtual bool parse_error(std::size_t position, - const std::string& last_token, - const detail::exception& ex) = 0; - - json_sax() = default; - json_sax(const json_sax&) = default; - json_sax(json_sax&&) noexcept = default; - json_sax& operator=(const json_sax&) = default; - json_sax& operator=(json_sax&&) noexcept = default; - virtual ~json_sax() = default; -}; - -namespace detail -{ -/*! -@brief SAX implementation to create a JSON value from SAX events - -This class implements the @ref json_sax interface and processes the SAX events -to create a JSON value which makes it basically a DOM parser. The structure or -hierarchy of the JSON value is managed by the stack `ref_stack` which contains -a pointer to the respective array or object for each recursion depth. - -After successful parsing, the value that is passed by reference to the -constructor contains the parsed value. - -@tparam BasicJsonType the JSON type -*/ -template -class json_sax_dom_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @param[in,out] r reference to a JSON value that is manipulated while - parsing - @param[in] allow_exceptions_ whether parse errors yield exceptions - */ - explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) - : root(r), allow_exceptions(allow_exceptions_) - {} - - // make class move-only - json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - ~json_sax_dom_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); - - if (JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_object()); - - // add null at given key and store the reference for later - object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val)); - return true; - } - - bool end_object() - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_object()); - - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - bool start_array(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); - - if (JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool end_array() - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_array()); - - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - */ - template - JSON_HEDLEY_RETURNS_NON_NULL - BasicJsonType* handle_value(Value&& v) - { - if (ref_stack.empty()) - { - root = BasicJsonType(std::forward(v)); - return &root; - } - - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->emplace_back(std::forward(v)); - return &(ref_stack.back()->m_data.m_value.array->back()); - } - - JSON_ASSERT(ref_stack.back()->is_object()); - JSON_ASSERT(object_element); - *object_element = BasicJsonType(std::forward(v)); - return object_element; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; -}; - -template -class json_sax_dom_callback_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using parser_callback_t = typename BasicJsonType::parser_callback_t; - using parse_event_t = typename BasicJsonType::parse_event_t; - - json_sax_dom_callback_parser(BasicJsonType& r, - const parser_callback_t cb, - const bool allow_exceptions_ = true) - : root(r), callback(cb), allow_exceptions(allow_exceptions_) - { - keep_stack.push_back(true); - } - - // make class move-only - json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - ~json_sax_dom_callback_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - // check callback for object start - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::object, true); - ref_stack.push_back(val.second); - - // check object limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - BasicJsonType k = BasicJsonType(val); - - // check callback for key - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); - key_keep_stack.push_back(keep); - - // add discarded value at given key and store the reference for later - if (keep && ref_stack.back()) - { - object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded); - } - - return true; - } - - bool end_object() - { - if (ref_stack.back()) - { - if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) - { - // discard object - *ref_stack.back() = discarded; - } - else - { - ref_stack.back()->set_parents(); - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) - { - // remove discarded value - for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) - { - if (it->is_discarded()) - { - ref_stack.back()->erase(it); - break; - } - } - } - - return true; - } - - bool start_array(std::size_t len) - { - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::array, true); - ref_stack.push_back(val.second); - - // check array limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool end_array() - { - bool keep = true; - - if (ref_stack.back()) - { - keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); - if (keep) - { - ref_stack.back()->set_parents(); - } - else - { - // discard array - *ref_stack.back() = discarded; - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - // remove discarded value - if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->pop_back(); - } - - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @param[in] v value to add to the JSON value we build during parsing - @param[in] skip_callback whether we should skip calling the callback - function; this is required after start_array() and - start_object() SAX events, because otherwise we would call the - callback function with an empty array or object, respectively. - - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - - @return pair of boolean (whether value should be kept) and pointer (to the - passed value in the ref_stack hierarchy; nullptr if not kept) - */ - template - std::pair handle_value(Value&& v, const bool skip_callback = false) - { - JSON_ASSERT(!keep_stack.empty()); - - // do not handle this value if we know it would be added to a discarded - // container - if (!keep_stack.back()) - { - return {false, nullptr}; - } - - // create value - auto value = BasicJsonType(std::forward(v)); - - // check callback - const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); - - // do not handle this value if we just learnt it shall be discarded - if (!keep) - { - return {false, nullptr}; - } - - if (ref_stack.empty()) - { - root = std::move(value); - return {true, & root}; - } - - // skip this value if we already decided to skip the parent - // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) - if (!ref_stack.back()) - { - return {false, nullptr}; - } - - // we now only expect arrays and objects - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - // array - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value)); - return {true, & (ref_stack.back()->m_data.m_value.array->back())}; - } - - // object - JSON_ASSERT(ref_stack.back()->is_object()); - // check if we should store an element for the current key - JSON_ASSERT(!key_keep_stack.empty()); - const bool store_element = key_keep_stack.back(); - key_keep_stack.pop_back(); - - if (!store_element) - { - return {false, nullptr}; - } - - JSON_ASSERT(object_element); - *object_element = std::move(value); - return {true, object_element}; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// stack to manage which values to keep - std::vector keep_stack {}; // NOLINT(readability-redundant-member-init) - /// stack to manage which object keys to keep - std::vector key_keep_stack {}; // NOLINT(readability-redundant-member-init) - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// callback function - const parser_callback_t callback = nullptr; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; - /// a discarded value for the callback - BasicJsonType discarded = BasicJsonType::value_t::discarded; -}; - -template -class json_sax_acceptor -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - bool null() - { - return true; - } - - bool boolean(bool /*unused*/) - { - return true; - } - - bool number_integer(number_integer_t /*unused*/) - { - return true; - } - - bool number_unsigned(number_unsigned_t /*unused*/) - { - return true; - } - - bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) - { - return true; - } - - bool string(string_t& /*unused*/) - { - return true; - } - - bool binary(binary_t& /*unused*/) - { - return true; - } - - bool start_object(std::size_t /*unused*/ = static_cast(-1)) - { - return true; - } - - bool key(string_t& /*unused*/) - { - return true; - } - - bool end_object() - { - return true; - } - - bool start_array(std::size_t /*unused*/ = static_cast(-1)) - { - return true; - } - - bool end_array() - { - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) - { - return false; - } -}; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -8339,7 +7989,7 @@ class lexer : public lexer_base locale's decimal point is used instead of `.` to work with the locale-dependent converters. */ - token_type scan_number() // lgtm [cpp/use-of-goto] + token_type scan_number() // lgtm [cpp/use-of-goto] `goto` is used in this function to implement the number-parsing state machine described above. By design, any finite input will eventually reach the "done" state or return token_type::parse_error. In each intermediate state, 1 byte of the input is appended to the token_buffer vector, and only the already initialized variables token_buffer, number_type, and error_message are manipulated. { // reset token_buffer to store the number's bytes reset(); @@ -8421,6 +8071,7 @@ scan_number_zero: case '.': { add(decimal_point_char); + decimal_point_position = token_buffer.size() - 1; goto scan_number_decimal1; } @@ -8457,6 +8108,7 @@ scan_number_any1: case '.': { add(decimal_point_char); + decimal_point_position = token_buffer.size() - 1; goto scan_number_decimal1; } @@ -8617,7 +8269,7 @@ scan_number_done: // we are done scanning a number) unget(); - char* endptr = nullptr; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) + char* endptr = nullptr; // NOLINT(misc-const-correctness,cppcoreguidelines-pro-type-vararg,hicpp-vararg) errno = 0; // try to parse integers first and fall back to floats @@ -8628,7 +8280,7 @@ scan_number_done: // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - if (errno == 0) + if (errno != ERANGE) { value_unsigned = static_cast(x); if (value_unsigned == x) @@ -8644,7 +8296,7 @@ scan_number_done: // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - if (errno == 0) + if (errno != ERANGE) { value_integer = static_cast(x); if (value_integer == x) @@ -8694,6 +8346,7 @@ scan_number_done: { token_buffer.clear(); token_string.clear(); + decimal_point_position = std::string::npos; token_string.push_back(char_traits::to_char_type(current)); } @@ -8802,6 +8455,11 @@ scan_number_done: /// return current string value (implicitly resets the token; useful only once) string_t& get_string() { + // translate decimal points from locale back to '.' (#4084) + if (decimal_point_char != '.' && decimal_point_position != std::string::npos) + { + token_buffer[decimal_point_position] = '.'; + } return token_buffer; } @@ -8999,6 +8657,8 @@ scan_number_done: /// the decimal point const char_int_type decimal_point_char = '.'; + /// the position of the decimal point in the input + std::size_t decimal_point_position = std::string::npos; }; } // namespace detail @@ -9006,13 +8666,986 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include + +NLOHMANN_JSON_NAMESPACE_BEGIN + +/*! +@brief SAX interface + +This class describes the SAX interface used by @ref nlohmann::json::sax_parse. +Each function is called in different situations while the input is parsed. The +boolean return value informs the parser whether to continue processing the +input. +*/ +template +struct json_sax +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + /*! + @brief a null value was read + @return whether parsing should proceed + */ + virtual bool null() = 0; + + /*! + @brief a boolean value was read + @param[in] val boolean value + @return whether parsing should proceed + */ + virtual bool boolean(bool val) = 0; + + /*! + @brief an integer number was read + @param[in] val integer value + @return whether parsing should proceed + */ + virtual bool number_integer(number_integer_t val) = 0; + + /*! + @brief an unsigned integer number was read + @param[in] val unsigned integer value + @return whether parsing should proceed + */ + virtual bool number_unsigned(number_unsigned_t val) = 0; + + /*! + @brief a floating-point number was read + @param[in] val floating-point value + @param[in] s raw token value + @return whether parsing should proceed + */ + virtual bool number_float(number_float_t val, const string_t& s) = 0; + + /*! + @brief a string value was read + @param[in] val string value + @return whether parsing should proceed + @note It is safe to move the passed string value. + */ + virtual bool string(string_t& val) = 0; + + /*! + @brief a binary value was read + @param[in] val binary value + @return whether parsing should proceed + @note It is safe to move the passed binary value. + */ + virtual bool binary(binary_t& val) = 0; + + /*! + @brief the beginning of an object was read + @param[in] elements number of object elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_object(std::size_t elements) = 0; + + /*! + @brief an object key was read + @param[in] val object key + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool key(string_t& val) = 0; + + /*! + @brief the end of an object was read + @return whether parsing should proceed + */ + virtual bool end_object() = 0; + + /*! + @brief the beginning of an array was read + @param[in] elements number of array elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_array(std::size_t elements) = 0; + + /*! + @brief the end of an array was read + @return whether parsing should proceed + */ + virtual bool end_array() = 0; + + /*! + @brief a parse error occurred + @param[in] position the position in the input where the error occurs + @param[in] last_token the last read token + @param[in] ex an exception object describing the error + @return whether parsing should proceed (must return false) + */ + virtual bool parse_error(std::size_t position, + const std::string& last_token, + const detail::exception& ex) = 0; + + json_sax() = default; + json_sax(const json_sax&) = default; + json_sax(json_sax&&) noexcept = default; + json_sax& operator=(const json_sax&) = default; + json_sax& operator=(json_sax&&) noexcept = default; + virtual ~json_sax() = default; +}; + +namespace detail +{ +constexpr std::size_t unknown_size() +{ + return (std::numeric_limits::max)(); +} + +/*! +@brief SAX implementation to create a JSON value from SAX events + +This class implements the @ref json_sax interface and processes the SAX events +to create a JSON value which makes it basically a DOM parser. The structure or +hierarchy of the JSON value is managed by the stack `ref_stack` which contains +a pointer to the respective array or object for each recursion depth. + +After successful parsing, the value that is passed by reference to the +constructor contains the parsed value. + +@tparam BasicJsonType the JSON type +*/ +template +class json_sax_dom_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using lexer_t = lexer; + + /*! + @param[in,out] r reference to a JSON value that is manipulated while + parsing + @param[in] allow_exceptions_ whether parse errors yield exceptions + */ + explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true, lexer_t* lexer_ = nullptr) + : root(r), allow_exceptions(allow_exceptions_), m_lexer_ref(lexer_) + {} + + // make class move-only + json_sax_dom_parser(const json_sax_dom_parser&) = delete; + json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + ~json_sax_dom_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the object here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the object, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); + } + + return true; + } + + bool key(string_t& val) + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_object()); + + // add null at given key and store the reference for later + object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val)); + return true; + } + + bool end_object() + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_object()); + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing brace, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + bool start_array(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the array here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); + } + + return true; + } + + bool end_array() + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_array()); + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing bracket, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + +#if JSON_DIAGNOSTIC_POSITIONS + void handle_diagnostic_positions_for_json_value(BasicJsonType& v) + { + if (m_lexer_ref) + { + // Lexer has read past the current field value, so set the end position to the current position. + // The start position will be set below based on the length of the string representation + // of the value. + v.end_position = m_lexer_ref->get_position(); + + switch (v.type()) + { + case value_t::boolean: + { + // 4 and 5 are the string length of "true" and "false" + v.start_position = v.end_position - (v.m_data.m_value.boolean ? 4 : 5); + break; + } + + case value_t::null: + { + // 4 is the string length of "null" + v.start_position = v.end_position - 4; + break; + } + + case value_t::string: + { + // include the length of the quotes, which is 2 + v.start_position = v.end_position - v.m_data.m_value.string->size() - 2; + break; + } + + // As we handle the start and end positions for values created during parsing, + // we do not expect the following value type to be called. Regardless, set the positions + // in case this is created manually or through a different constructor. Exclude from lcov + // since the exact condition of this switch is esoteric. + // LCOV_EXCL_START + case value_t::discarded: + { + v.end_position = std::string::npos; + v.start_position = v.end_position; + break; + } + // LCOV_EXCL_STOP + case value_t::binary: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::number_float: + { + v.start_position = v.end_position - m_lexer_ref->get_string().size(); + break; + } + case value_t::object: + case value_t::array: + { + // object and array are handled in start_object() and start_array() handlers + // skip setting the values here. + break; + } + default: // LCOV_EXCL_LINE + // Handle all possible types discretely, default handler should never be reached. + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert,-warnings-as-errors) LCOV_EXCL_LINE + } + } + } +#endif + + /*! + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + */ + template + JSON_HEDLEY_RETURNS_NON_NULL + BasicJsonType* handle_value(Value&& v) + { + if (ref_stack.empty()) + { + root = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(root); +#endif + + return &root; + } + + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->emplace_back(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(ref_stack.back()->m_data.m_value.array->back()); +#endif + + return &(ref_stack.back()->m_data.m_value.array->back()); + } + + JSON_ASSERT(ref_stack.back()->is_object()); + JSON_ASSERT(object_element); + *object_element = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(*object_element); +#endif + + return object_element; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// the lexer reference to obtain the current position + lexer_t* m_lexer_ref = nullptr; +}; + +template +class json_sax_dom_callback_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using parser_callback_t = typename BasicJsonType::parser_callback_t; + using parse_event_t = typename BasicJsonType::parse_event_t; + using lexer_t = lexer; + + json_sax_dom_callback_parser(BasicJsonType& r, + parser_callback_t cb, + const bool allow_exceptions_ = true, + lexer_t* lexer_ = nullptr) + : root(r), callback(std::move(cb)), allow_exceptions(allow_exceptions_), m_lexer_ref(lexer_) + { + keep_stack.push_back(true); + } + + // make class move-only + json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + ~json_sax_dom_callback_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + // check callback for object start + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::object, true); + ref_stack.push_back(val.second); + + if (ref_stack.back()) + { + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the object here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the object, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + // check object limit + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); + } + } + return true; + } + + bool key(string_t& val) + { + BasicJsonType k = BasicJsonType(val); + + // check callback for key + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); + key_keep_stack.push_back(keep); + + // add discarded value at given key and store the reference for later + if (keep && ref_stack.back()) + { + object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded); + } + + return true; + } + + bool end_object() + { + if (ref_stack.back()) + { + if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) + { + // discard object + *ref_stack.back() = discarded; + +#if JSON_DIAGNOSTIC_POSITIONS + // Set start/end positions for discarded object. + handle_diagnostic_positions_for_json_value(*ref_stack.back()); +#endif + } + else + { + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing brace, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) + { + // remove discarded value + for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) + { + if (it->is_discarded()) + { + ref_stack.back()->erase(it); + break; + } + } + } + + return true; + } + + bool start_array(std::size_t len) + { + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::array, true); + ref_stack.push_back(val.second); + + if (ref_stack.back()) + { + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the array here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the array, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + // check array limit + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); + } + } + + return true; + } + + bool end_array() + { + bool keep = true; + + if (ref_stack.back()) + { + keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); + if (keep) + { + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing bracket, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + } + else + { + // discard array + *ref_stack.back() = discarded; + +#if JSON_DIAGNOSTIC_POSITIONS + // Set start/end positions for discarded array. + handle_diagnostic_positions_for_json_value(*ref_stack.back()); +#endif + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + // remove discarded value + if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->pop_back(); + } + + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + +#if JSON_DIAGNOSTIC_POSITIONS + void handle_diagnostic_positions_for_json_value(BasicJsonType& v) + { + if (m_lexer_ref) + { + // Lexer has read past the current field value, so set the end position to the current position. + // The start position will be set below based on the length of the string representation + // of the value. + v.end_position = m_lexer_ref->get_position(); + + switch (v.type()) + { + case value_t::boolean: + { + // 4 and 5 are the string length of "true" and "false" + v.start_position = v.end_position - (v.m_data.m_value.boolean ? 4 : 5); + break; + } + + case value_t::null: + { + // 4 is the string length of "null" + v.start_position = v.end_position - 4; + break; + } + + case value_t::string: + { + // include the length of the quotes, which is 2 + v.start_position = v.end_position - v.m_data.m_value.string->size() - 2; + break; + } + + case value_t::discarded: + { + v.end_position = std::string::npos; + v.start_position = v.end_position; + break; + } + + case value_t::binary: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::number_float: + { + v.start_position = v.end_position - m_lexer_ref->get_string().size(); + break; + } + + case value_t::object: + case value_t::array: + { + // object and array are handled in start_object() and start_array() handlers + // skip setting the values here. + break; + } + default: // LCOV_EXCL_LINE + // Handle all possible types discretely, default handler should never be reached. + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert,-warnings-as-errors) LCOV_EXCL_LINE + } + } + } +#endif + + /*! + @param[in] v value to add to the JSON value we build during parsing + @param[in] skip_callback whether we should skip calling the callback + function; this is required after start_array() and + start_object() SAX events, because otherwise we would call the + callback function with an empty array or object, respectively. + + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + + @return pair of boolean (whether value should be kept) and pointer (to the + passed value in the ref_stack hierarchy; nullptr if not kept) + */ + template + std::pair handle_value(Value&& v, const bool skip_callback = false) + { + JSON_ASSERT(!keep_stack.empty()); + + // do not handle this value if we know it would be added to a discarded + // container + if (!keep_stack.back()) + { + return {false, nullptr}; + } + + // create value + auto value = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(value); +#endif + + // check callback + const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); + + // do not handle this value if we just learnt it shall be discarded + if (!keep) + { + return {false, nullptr}; + } + + if (ref_stack.empty()) + { + root = std::move(value); + return {true, & root}; + } + + // skip this value if we already decided to skip the parent + // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) + if (!ref_stack.back()) + { + return {false, nullptr}; + } + + // we now only expect arrays and objects + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + // array + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value)); + return {true, & (ref_stack.back()->m_data.m_value.array->back())}; + } + + // object + JSON_ASSERT(ref_stack.back()->is_object()); + // check if we should store an element for the current key + JSON_ASSERT(!key_keep_stack.empty()); + const bool store_element = key_keep_stack.back(); + key_keep_stack.pop_back(); + + if (!store_element) + { + return {false, nullptr}; + } + + JSON_ASSERT(object_element); + *object_element = std::move(value); + return {true, object_element}; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// stack to manage which values to keep + std::vector keep_stack {}; // NOLINT(readability-redundant-member-init) + /// stack to manage which object keys to keep + std::vector key_keep_stack {}; // NOLINT(readability-redundant-member-init) + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// callback function + const parser_callback_t callback = nullptr; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// a discarded value for the callback + BasicJsonType discarded = BasicJsonType::value_t::discarded; + /// the lexer reference to obtain the current position + lexer_t* m_lexer_ref = nullptr; +}; + +template +class json_sax_acceptor +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + bool null() + { + return true; + } + + bool boolean(bool /*unused*/) + { + return true; + } + + bool number_integer(number_integer_t /*unused*/) + { + return true; + } + + bool number_unsigned(number_unsigned_t /*unused*/) + { + return true; + } + + bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) + { + return true; + } + + bool string(string_t& /*unused*/) + { + return true; + } + + bool binary(binary_t& /*unused*/) + { + return true; + } + + bool start_object(std::size_t /*unused*/ = detail::unknown_size()) + { + return true; + } + + bool key(string_t& /*unused*/) + { + return true; + } + + bool end_object() + { + return true; + } + + bool start_array(std::size_t /*unused*/ = detail::unknown_size()) + { + return true; + } + + bool end_array() + { + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) + { + return false; + } +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include + // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -9208,7 +9841,7 @@ static inline bool little_endianness(int num = 1) noexcept /*! @brief deserialization of CBOR, MessagePack, and UBJSON values */ -template> +template> class binary_reader { using number_integer_t = typename BasicJsonType::number_integer_t; @@ -9315,7 +9948,7 @@ class binary_reader std::int32_t document_size{}; get_number(input_format_t::bson, document_size); - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -9471,6 +10104,12 @@ class binary_reader return get_number(input_format_t::bson, value) && sax->number_integer(value); } + case 0x11: // uint64 + { + std::uint64_t value{}; + return get_number(input_format_t::bson, value) && sax->number_unsigned(value); + } + default: // anything else not supported (yet) { std::array cr{{}}; @@ -9537,7 +10176,7 @@ class binary_reader std::int32_t document_size{}; get_number(input_format_t::bson, document_size); - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -9797,7 +10436,7 @@ class binary_reader } case 0x9F: // array (indefinite length) - return get_cbor_array(static_cast(-1), tag_handler); + return get_cbor_array(detail::unknown_size(), tag_handler); // map (0x00..0x17 pairs of data items follow) case 0xA0: @@ -9851,7 +10490,7 @@ class binary_reader } case 0xBF: // map (indefinite length) - return get_cbor_object(static_cast(-1), tag_handler); + return get_cbor_object(detail::unknown_size(), tag_handler); case 0xC6: // tagged item case 0xC7: @@ -10239,7 +10878,7 @@ class binary_reader } /*! - @param[in] len the length of the array or static_cast(-1) for an + @param[in] len the length of the array or detail::unknown_size() for an array of indefinite size @param[in] tag_handler how CBOR tags should be treated @return whether array creation completed @@ -10252,7 +10891,7 @@ class binary_reader return false; } - if (len != static_cast(-1)) + if (len != detail::unknown_size()) { for (std::size_t i = 0; i < len; ++i) { @@ -10277,7 +10916,7 @@ class binary_reader } /*! - @param[in] len the length of the object or static_cast(-1) for an + @param[in] len the length of the object or detail::unknown_size() for an object of indefinite size @param[in] tag_handler how CBOR tags should be treated @return whether object creation completed @@ -10293,7 +10932,7 @@ class binary_reader if (len != 0) { string_t key; - if (len != static_cast(-1)) + if (len != detail::unknown_size()) { for (std::size_t i = 0; i < len; ++i) { @@ -11456,6 +12095,16 @@ class binary_reader case 'Z': // null return sax->null(); + case 'B': // byte + { + if (input_format != input_format_t::bjdata) + { + break; + } + std::uint8_t number{}; + return get_number(input_format, number) && sax->number_unsigned(number); + } + case 'U': { std::uint8_t number{}; @@ -11656,7 +12305,7 @@ class binary_reader return false; } - if (size_and_type.second == 'C') + if (size_and_type.second == 'C' || size_and_type.second == 'B') { size_and_type.second = 'U'; } @@ -11678,6 +12327,13 @@ class binary_reader return (sax->end_array() && sax->end_object()); } + // If BJData type marker is 'B' decode as binary + if (input_format == input_format_t::bjdata && size_and_type.first != npos && size_and_type.second == 'B') + { + binary_t result; + return get_binary(input_format, size_and_type.first, result) && sax->binary(result); + } + if (size_and_type.first != npos) { if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first))) @@ -11711,7 +12367,7 @@ class binary_reader } else { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -11789,7 +12445,7 @@ class binary_reader } else { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -11900,6 +12556,29 @@ class binary_reader return current = ia.get_character(); } + /*! + @brief get_to read into a primitive type + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns false instead + + @return bool, whether the read was successful + */ + template + bool get_to(T& dest, const input_format_t format, const char* context) + { + auto new_chars_read = ia.get_elements(&dest); + chars_read += new_chars_read; + if (JSON_HEDLEY_UNLIKELY(new_chars_read < sizeof(T))) + { + // in case of failure, advance position by 1 to report failing location + ++chars_read; + sax->parse_error(chars_read, "", parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context), nullptr)); + return false; + } + return true; + } + /*! @return character read from the input after ignoring all 'N' entries */ @@ -11914,6 +12593,28 @@ class binary_reader return current; } + template + static void byte_swap(NumberType& number) + { + constexpr std::size_t sz = sizeof(number); +#ifdef __cpp_lib_byteswap + if constexpr (sz == 1) + { + return; + } + if constexpr(std::is_integral_v) + { + number = std::byteswap(number); + return; + } +#endif + auto* ptr = reinterpret_cast(&number); + for (std::size_t i = 0; i < sz / 2; ++i) + { + std::swap(ptr[i], ptr[sz - i - 1]); + } + } + /* @brief read a number from the input @@ -11932,29 +12633,16 @@ class binary_reader template bool get_number(const input_format_t format, NumberType& result) { - // step 1: read input into array with system's byte order - std::array vec{}; - for (std::size_t i = 0; i < sizeof(NumberType); ++i) + // read in the original format + + if (JSON_HEDLEY_UNLIKELY(!get_to(result, format, "number"))) { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number"))) - { - return false; - } - - // reverse byte order prior to conversion if necessary - if (is_little_endian != (InputIsLittleEndian || format == input_format_t::bjdata)) - { - vec[sizeof(NumberType) - i - 1] = static_cast(current); - } - else - { - vec[i] = static_cast(current); // LCOV_EXCL_LINE - } + return false; + } + if (is_little_endian != (InputIsLittleEndian || format == input_format_t::bjdata)) + { + byte_swap(result); } - - // step 2: convert array into number of type T and return - std::memcpy(&result, vec.data(), sizeof(NumberType)); return true; } @@ -12093,7 +12781,7 @@ class binary_reader } private: - static JSON_INLINE_VARIABLE constexpr std::size_t npos = static_cast(-1); + static JSON_INLINE_VARIABLE constexpr std::size_t npos = detail::unknown_size(); /// input adapter InputAdapterType ia; @@ -12119,6 +12807,7 @@ class binary_reader #define JSON_BINARY_READER_MAKE_BJD_TYPES_MAP_ \ make_array( \ + bjd_type{'B', "byte"}, \ bjd_type{'C', "char"}, \ bjd_type{'D', "double"}, \ bjd_type{'I', "int16"}, \ @@ -12161,10 +12850,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12238,10 +12927,10 @@ class parser public: /// a parser reading from an input adapter explicit parser(InputAdapterType&& adapter, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions_ = true, const bool skip_comments = false) - : callback(cb) + : callback(std::move(cb)) , m_lexer(std::move(adapter), skip_comments) , allow_exceptions(allow_exceptions_) { @@ -12263,7 +12952,7 @@ class parser { if (callback) { - json_sax_dom_callback_parser sdp(result, callback, allow_exceptions); + json_sax_dom_callback_parser sdp(result, callback, allow_exceptions, &m_lexer); sax_parse_internal(&sdp); // in strict mode, input must be completely read @@ -12291,7 +12980,7 @@ class parser } else { - json_sax_dom_parser sdp(result, allow_exceptions); + json_sax_dom_parser sdp(result, allow_exceptions, &m_lexer); sax_parse_internal(&sdp); // in strict mode, input must be completely read @@ -12363,7 +13052,7 @@ class parser { case token_type::begin_object: { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -12408,7 +13097,7 @@ class parser case token_type::begin_array: { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -12690,10 +13379,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12703,10 +13392,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12862,10 +13551,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13332,7 +14021,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > bool operator==(const IterImpl& other) const @@ -13343,7 +14032,11 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } - JSON_ASSERT(m_object != nullptr); + // value-initialized forward iterators can be compared, and must compare equal to other value-initialized iterators of the same type #4493 + if (m_object == nullptr) + { + return true; + } switch (m_object->m_data.m_type) { @@ -13368,7 +14061,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: not equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > bool operator!=(const IterImpl& other) const @@ -13378,7 +14071,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: smaller - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator<(const iter_impl& other) const { @@ -13388,7 +14081,12 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } - JSON_ASSERT(m_object != nullptr); + // value-initialized forward iterators can be compared, and must compare equal to other value-initialized iterators of the same type #4493 + if (m_object == nullptr) + { + // the iterators are both value-initialized and are to be considered equal, but this function checks for smaller, so we return false + return false; + } switch (m_object->m_data.m_type) { @@ -13413,7 +14111,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: less than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator<=(const iter_impl& other) const { @@ -13422,7 +14120,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: greater than - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator>(const iter_impl& other) const { @@ -13431,7 +14129,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: greater than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) The iterator is initialized; i.e. `m_object != nullptr`, or (2) both iterators are value-initialized. */ bool operator>=(const iter_impl& other) const { @@ -13624,10 +14322,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13759,10 +14457,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13801,10 +14499,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14034,7 +14732,7 @@ class json_pointer } const char* p = s.c_str(); - char* p_end = nullptr; + char* p_end = nullptr; // NOLINT(misc-const-correctness) errno = 0; // strtoull doesn't reset errno const unsigned long long res = std::strtoull(p, &p_end, 10); // NOLINT(runtime/int) if (p == p_end // invalid input or empty string @@ -14556,7 +15254,7 @@ class json_pointer // iterate array and use index as reference string for (std::size_t i = 0; i < value.m_data.m_value.array->size(); ++i) { - flatten(detail::concat(reference_string, '/', std::to_string(i)), + flatten(detail::concat(reference_string, '/', std::to_string(i)), value.m_data.m_value.array->operator[](i), result); } } @@ -14575,7 +15273,7 @@ class json_pointer // iterate object and use keys as reference string for (const auto& element : *value.m_data.m_value.object) { - flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result); + flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result); } } break; @@ -14796,10 +15494,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14881,6 +15579,8 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include + // #include // #include @@ -14888,10 +15588,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14914,10 +15614,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -15068,6 +15768,13 @@ NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { +/// how to encode BJData +enum class bjdata_version_t +{ + draft2, + draft3, +}; + /////////////////// // binary writer // /////////////////// @@ -15652,7 +16359,7 @@ class binary_writer case value_t::binary: { // step 0: determine if the binary type has a set subtype to - // determine whether or not to use the ext or fixext types + // determine whether to use the ext or fixext types const bool use_ext = j.m_data.m_value.binary->has_subtype(); // step 1: write control byte and the byte string length @@ -15775,11 +16482,14 @@ class binary_writer @param[in] use_type whether to use '$' prefixes (optimized format) @param[in] add_prefix whether prefixes need to be used for this value @param[in] use_bjdata whether write in BJData format, default is false + @param[in] bjdata_version which BJData version to use, default is draft2 */ void write_ubjson(const BasicJsonType& j, const bool use_count, const bool use_type, const bool add_prefix = true, - const bool use_bjdata = false) + const bool use_bjdata = false, const bjdata_version_t bjdata_version = bjdata_version_t::draft2) { + const bool bjdata_draft3 = use_bjdata && bjdata_version == bjdata_version_t::draft3; + switch (j.type()) { case value_t::null: @@ -15869,7 +16579,7 @@ class binary_writer for (const auto& el : *j.m_data.m_value.array) { - write_ubjson(el, use_count, use_type, prefix_required, use_bjdata); + write_ubjson(el, use_count, use_type, prefix_required, use_bjdata, bjdata_version); } if (!use_count) @@ -15887,11 +16597,11 @@ class binary_writer oa->write_character(to_char_type('[')); } - if (use_type && !j.m_data.m_value.binary->empty()) + if (use_type && (bjdata_draft3 || !j.m_data.m_value.binary->empty())) { JSON_ASSERT(use_count); oa->write_character(to_char_type('$')); - oa->write_character('U'); + oa->write_character(bjdata_draft3 ? 'B' : 'U'); } if (use_count) @@ -15910,7 +16620,7 @@ class binary_writer { for (size_t i = 0; i < j.m_data.m_value.binary->size(); ++i) { - oa->write_character(to_char_type('U')); + oa->write_character(to_char_type(bjdata_draft3 ? 'B' : 'U')); oa->write_character(j.m_data.m_value.binary->data()[i]); } } @@ -15927,7 +16637,7 @@ class binary_writer { if (use_bjdata && j.m_data.m_value.object->size() == 3 && j.m_data.m_value.object->find("_ArrayType_") != j.m_data.m_value.object->end() && j.m_data.m_value.object->find("_ArraySize_") != j.m_data.m_value.object->end() && j.m_data.m_value.object->find("_ArrayData_") != j.m_data.m_value.object->end()) { - if (!write_bjdata_ndarray(*j.m_data.m_value.object, use_count, use_type)) // decode bjdata ndarray in the JData format (https://github.com/NeuroJSON/jdata) + if (!write_bjdata_ndarray(*j.m_data.m_value.object, use_count, use_type, bjdata_version)) // decode bjdata ndarray in the JData format (https://github.com/NeuroJSON/jdata) { break; } @@ -15971,7 +16681,7 @@ class binary_writer oa->write_characters( reinterpret_cast(el.first.c_str()), el.first.size()); - write_ubjson(el.second, use_count, use_type, prefix_required, use_bjdata); + write_ubjson(el.second, use_count, use_type, prefix_required, use_bjdata, bjdata_version); } if (!use_count) @@ -16127,7 +16837,8 @@ class binary_writer } else { - JSON_THROW(out_of_range::create(407, concat("integer number ", std::to_string(j.m_data.m_value.number_unsigned), " cannot be represented by BSON as it does not fit int64"), &j)); + write_bson_entry_header(name, 0x11 /* uint64 */); + write_number(static_cast(j.m_data.m_value.number_unsigned), true); } } @@ -16655,10 +17366,11 @@ class binary_writer /*! @return false if the object is successfully converted to a bjdata ndarray, true if the type or size is invalid */ - bool write_bjdata_ndarray(const typename BasicJsonType::object_t& value, const bool use_count, const bool use_type) + bool write_bjdata_ndarray(const typename BasicJsonType::object_t& value, const bool use_count, const bool use_type, const bjdata_version_t bjdata_version) { std::map bjdtype = {{"uint8", 'U'}, {"int8", 'i'}, {"uint16", 'u'}, {"int16", 'I'}, - {"uint32", 'm'}, {"int32", 'l'}, {"uint64", 'M'}, {"int64", 'L'}, {"single", 'd'}, {"double", 'D'}, {"char", 'C'} + {"uint32", 'm'}, {"int32", 'l'}, {"uint64", 'M'}, {"int64", 'L'}, {"single", 'd'}, {"double", 'D'}, + {"char", 'C'}, {"byte", 'B'} }; string_t key = "_ArrayType_"; @@ -16688,10 +17400,10 @@ class binary_writer oa->write_character('#'); key = "_ArraySize_"; - write_ubjson(value.at(key), use_count, use_type, true, true); + write_ubjson(value.at(key), use_count, use_type, true, true, bjdata_version); key = "_ArrayData_"; - if (dtype == 'U' || dtype == 'C') + if (dtype == 'U' || dtype == 'C' || dtype == 'B') { for (const auto& el : value.at(key)) { @@ -16882,11 +17594,11 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2008-2009 Björn Hoehrmann -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2008 - 2009 Björn Hoehrmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -16907,11 +17619,11 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2009 Florian Loitsch -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -17147,10 +17859,10 @@ boundaries compute_boundaries(FloatType value) // v- m- v m+ v+ const bool lower_boundary_is_closer = F == 0 && E > 1; - const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); + const diyfp m_plus = diyfp((2 * v.f) + 1, v.e - 1); const diyfp m_minus = lower_boundary_is_closer - ? diyfp(4 * v.f - 1, v.e - 2) // (B) - : diyfp(2 * v.f - 1, v.e - 1); // (A) + ? diyfp((4 * v.f) - 1, v.e - 2) // (B) + : diyfp((2 * v.f) - 1, v.e - 1); // (A) // Determine the normalized w+ = m+. const diyfp w_plus = diyfp::normalize(m_plus); @@ -17380,7 +18092,7 @@ inline cached_power get_cached_power_for_binary_exponent(int e) JSON_ASSERT(e >= -1500); JSON_ASSERT(e <= 1500); const int f = kAlpha - e - 1; - const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); + const int k = ((f * 78913) / (1 << 18)) + static_cast(f > 0); const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; JSON_ASSERT(index >= 0); @@ -17858,15 +18570,15 @@ inline char* append_exponent(char* buf, int e) } else if (k < 100) { - *buf++ = static_cast('0' + k / 10); + *buf++ = static_cast('0' + (k / 10)); k %= 10; *buf++ = static_cast('0' + k); } else { - *buf++ = static_cast('0' + k / 100); + *buf++ = static_cast('0' + (k / 100)); k %= 100; - *buf++ = static_cast('0' + k / 10); + *buf++ = static_cast('0' + (k / 10)); k %= 10; *buf++ = static_cast('0' + k); } @@ -18652,7 +19364,7 @@ class serializer @param[in] x unsigned integer number to count its digits @return number of decimal digits */ - inline unsigned int count_digits(number_unsigned_t x) noexcept + unsigned int count_digits(number_unsigned_t x) noexcept { unsigned int n_digits = 1; for (;;) @@ -18935,7 +19647,7 @@ class serializer ? (byte & 0x3fu) | (codep << 6u) : (0xFFu >> type) & (byte); - const std::size_t index = 256u + static_cast(state) * 16u + static_cast(type); + const std::size_t index = 256u + (static_cast(state) * 16u) + static_cast(type); JSON_ASSERT(index < utf8d.size()); state = utf8d[index]; return state; @@ -18961,7 +19673,7 @@ class serializer * absolute values of INT_MIN and INT_MAX are usually not the same. See * #1708 for details. */ - inline number_unsigned_t remove_sign(number_integer_t x) noexcept + number_unsigned_t remove_sign(number_integer_t x) noexcept { JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); // NOLINT(misc-redundant-expression) return static_cast(-(x + 1)) + 1; @@ -19003,10 +19715,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -19031,7 +19743,7 @@ NLOHMANN_JSON_NAMESPACE_BEGIN /// for use within nlohmann::basic_json template , class Allocator = std::allocator>> - struct ordered_map : std::vector, Allocator> + struct ordered_map : std::vector, Allocator> { using key_type = Key; using mapped_type = T; @@ -19346,7 +20058,7 @@ template , template using require_input_iter = typename std::enable_if::iterator_category, - std::input_iterator_tag>::value>::type; + std::input_iterator_tag>::value>::type; template> void insert(InputIt first, InputIt last) @@ -19417,9 +20129,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec friend class ::nlohmann::detail::binary_writer; template friend class ::nlohmann::detail::binary_reader; - template + template friend class ::nlohmann::detail::json_sax_dom_parser; - template + template friend class ::nlohmann::detail::json_sax_dom_callback_parser; friend class ::nlohmann::detail::exception; @@ -19440,7 +20152,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec ) { return ::nlohmann::detail::parser(std::move(adapter), - std::move(cb), allow_exceptions, ignore_comments); + std::move(cb), allow_exceptions, ignore_comments); } private: @@ -19473,6 +20185,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec using error_handler_t = detail::error_handler_t; /// how to treat CBOR tags using cbor_tag_handler_t = detail::cbor_tag_handler_t; + /// how to encode BJData + using bjdata_version_t = detail::bjdata_version_t; /// helper type for initializer lists of basic_json values using initializer_list_t = std::initializer_list>; @@ -19552,7 +20266,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { basic_json result; - result["copyright"] = "(C) 2013-2023 Niels Lohmann"; + result["copyright"] = "(C) 2013-2025 Niels Lohmann"; result["name"] = "JSON for Modern C++"; result["url"] = "https://github.com/nlohmann/json"; result["version"]["string"] = @@ -19817,7 +20531,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec object = nullptr; // silence warning, see #821 if (JSON_HEDLEY_UNLIKELY(t == value_t::null)) { - JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.11.3", nullptr)); // LCOV_EXCL_LINE + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.12.0", nullptr)); // LCOV_EXCL_LINE } break; } @@ -20053,10 +20767,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return it; } - reference set_parent(reference j, std::size_t old_capacity = static_cast(-1)) + reference set_parent(reference j, std::size_t old_capacity = detail::unknown_size()) { #if JSON_DIAGNOSTICS - if (old_capacity != static_cast(-1)) + if (old_capacity != detail::unknown_size()) { // see https://github.com/nlohmann/json/issues/2838 JSON_ASSERT(type() == value_t::array); @@ -20136,8 +20850,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload,bugprone-exception-escape) - JSONSerializer::to_json(std::declval(), - std::forward(val)))) + JSONSerializer::to_json(std::declval(), + std::forward(val)))) { JSONSerializer::to_json(*this, std::forward(val)); set_parents(); @@ -20150,6 +20864,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::enable_if_t < detail::is_basic_json::value&& !std::is_same::value, int > = 0 > basic_json(const BasicJsonType& val) +#if JSON_DIAGNOSTIC_POSITIONS + : start_position(val.start_pos()), + end_position(val.end_pos()) +#endif { using other_boolean_t = typename BasicJsonType::boolean_t; using other_number_float_t = typename BasicJsonType::number_float_t; @@ -20196,6 +20914,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } JSON_ASSERT(m_data.m_type == val.type()); + set_parents(); assert_invariant(); } @@ -20332,7 +21051,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class InputIT, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type = 0 > - basic_json(InputIT first, InputIT last) + basic_json(InputIT first, InputIT last) // NOLINT(performance-unnecessary-value-param) { JSON_ASSERT(first.m_object != nullptr); JSON_ASSERT(last.m_object != nullptr); @@ -20447,6 +21166,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/basic_json/ basic_json(const basic_json& other) : json_base_class_t(other) +#if JSON_DIAGNOSTIC_POSITIONS + , start_position(other.start_position) + , end_position(other.end_position) +#endif { m_data.m_type = other.m_data.m_type; // check of passed value is valid @@ -20516,15 +21239,24 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/basic_json/ basic_json(basic_json&& other) noexcept : json_base_class_t(std::forward(other)), - m_data(std::move(other.m_data)) + m_data(std::move(other.m_data)) // cppcheck-suppress[accessForwarded] TODO check +#if JSON_DIAGNOSTIC_POSITIONS + , start_position(other.start_position) // cppcheck-suppress[accessForwarded] TODO check + , end_position(other.end_position) // cppcheck-suppress[accessForwarded] TODO check +#endif { // check that passed value is valid - other.assert_invariant(false); + other.assert_invariant(false); // cppcheck-suppress[accessForwarded] // invalidate payload other.m_data.m_type = value_t::null; other.m_data.m_value = {}; +#if JSON_DIAGNOSTIC_POSITIONS + other.start_position = std::string::npos; + other.end_position = std::string::npos; +#endif + set_parents(); assert_invariant(); } @@ -20545,6 +21277,12 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec using std::swap; swap(m_data.m_type, other.m_data.m_type); swap(m_data.m_value, other.m_data.m_value); + +#if JSON_DIAGNOSTIC_POSITIONS + swap(start_position, other.start_position); + swap(end_position, other.end_position); +#endif + json_base_class_t::operator=(std::move(other)); set_parents(); @@ -20766,13 +21504,13 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// get a pointer to the value (integer number) number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept { - return is_number_integer() ? &m_data.m_value.number_integer : nullptr; + return m_data.m_type == value_t::number_integer ? &m_data.m_value.number_integer : nullptr; } /// get a pointer to the value (integer number) constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept { - return is_number_integer() ? &m_data.m_value.number_integer : nullptr; + return m_data.m_type == value_t::number_integer ? &m_data.m_value.number_integer : nullptr; } /// get a pointer to the value (unsigned number) @@ -20907,7 +21645,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_from_json::value, int > = 0 > ValueType get_impl(detail::priority_tag<0> /*unused*/) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), std::declval()))) + JSONSerializer::from_json(std::declval(), std::declval()))) { auto ret = ValueType(); JSONSerializer::from_json(*this, ret); @@ -20949,7 +21687,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_non_default_from_json::value, int > = 0 > ValueType get_impl(detail::priority_tag<1> /*unused*/) const noexcept(noexcept( - JSONSerializer::from_json(std::declval()))) + JSONSerializer::from_json(std::declval()))) { return JSONSerializer::from_json(*this); } @@ -21099,7 +21837,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_from_json::value, int > = 0 > ValueType & get_to(ValueType& v) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), v))) + JSONSerializer::from_json(std::declval(), v))) { JSONSerializer::from_json(*this, v); return v; @@ -21251,7 +21989,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { // create better exception explanation JSON_THROW(out_of_range::create(401, detail::concat("array index ", std::to_string(idx), " is out of range"), this)); - } + } // cppcheck-suppress[missingReturn] } else { @@ -21274,7 +22012,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { // create better exception explanation JSON_THROW(out_of_range::create(401, detail::concat("array index ", std::to_string(idx), " is out of range"), this)); - } + } // cppcheck-suppress[missingReturn] } else { @@ -21419,7 +22157,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief access specified object element /// @sa https://json.nlohmann.me/api/basic_json/operator%5B%5D/ - reference operator[](typename object_t::key_type key) + reference operator[](typename object_t::key_type key) // NOLINT(performance-unnecessary-value-param) { // implicitly convert null value to an empty object if (is_null()) @@ -21729,7 +22467,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class IteratorType, detail::enable_if_t < std::is_same::value || std::is_same::value, int > = 0 > - IteratorType erase(IteratorType pos) + IteratorType erase(IteratorType pos) // NOLINT(performance-unnecessary-value-param) { // make sure iterator fits the current value if (JSON_HEDLEY_UNLIKELY(this != pos.m_object)) @@ -21799,7 +22537,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class IteratorType, detail::enable_if_t < std::is_same::value || std::is_same::value, int > = 0 > - IteratorType erase(IteratorType first, IteratorType last) + IteratorType erase(IteratorType first, IteratorType last) // NOLINT(performance-unnecessary-value-param) { // make sure iterator fits the current value if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object)) @@ -22566,7 +23304,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @note: This uses std::distance to support GCC 4.8, /// see https://github.com/nlohmann/json/pull/1257 template - iterator insert_iterator(const_iterator pos, Args&& ... args) + iterator insert_iterator(const_iterator pos, Args&& ... args) // NOLINT(performance-unnecessary-value-param) { iterator result(this); JSON_ASSERT(m_data.m_value.array != nullptr); @@ -22585,7 +23323,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, const basic_json& val) + iterator insert(const_iterator pos, const basic_json& val) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22605,14 +23343,14 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, basic_json&& val) + iterator insert(const_iterator pos, basic_json&& val) // NOLINT(performance-unnecessary-value-param) { return insert(pos, val); } /// @brief inserts copies of element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, size_type cnt, const basic_json& val) + iterator insert(const_iterator pos, size_type cnt, const basic_json& val) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22632,7 +23370,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts range of elements into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, const_iterator first, const_iterator last) + iterator insert(const_iterator pos, const_iterator first, const_iterator last) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_UNLIKELY(!is_array())) @@ -22663,7 +23401,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts elements from initializer list into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, initializer_list_t ilist) + iterator insert(const_iterator pos, initializer_list_t ilist) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_UNLIKELY(!is_array())) @@ -22683,7 +23421,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts range of elements into object /// @sa https://json.nlohmann.me/api/basic_json/insert/ - void insert(const_iterator first, const_iterator last) + void insert(const_iterator first, const_iterator last) // NOLINT(performance-unnecessary-value-param) { // insert only works for objects if (JSON_HEDLEY_UNLIKELY(!is_object())) @@ -22704,6 +23442,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec } m_data.m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator); + set_parents(); } /// @brief updates a JSON object from another object, overwriting existing keys @@ -22715,7 +23454,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief updates a JSON object from another object, overwriting existing keys /// @sa https://json.nlohmann.me/api/basic_json/update/ - void update(const_iterator first, const_iterator last, bool merge_objects = false) + void update(const_iterator first, const_iterator last, bool merge_objects = false) // NOLINT(performance-unnecessary-value-param) { // implicitly convert null value to an empty object if (is_null()) @@ -23316,12 +24055,12 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json parse(InputType&& i, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(detail::input_adapter(std::forward(i)), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(detail::input_adapter(std::forward(i)), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved,accessForwarded] return result; } @@ -23331,24 +24070,24 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json parse(IteratorType first, IteratorType last, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(detail::input_adapter(std::move(first), std::move(last)), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved] return result; } JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len)) static basic_json parse(detail::span_input_adapter&& i, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(i.get(), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved] return result; } @@ -23527,6 +24266,23 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec basic_json* m_parent = nullptr; #endif +#if JSON_DIAGNOSTIC_POSITIONS + /// the start position of the value + std::size_t start_position = std::string::npos; + /// the end position of the value + std::size_t end_position = std::string::npos; + public: + constexpr std::size_t start_pos() const noexcept + { + return start_position; + } + + constexpr std::size_t end_pos() const noexcept + { + return end_position; + } +#endif + ////////////////////////////////////////// // binary serialization/deserialization // ////////////////////////////////////////// @@ -23612,27 +24368,30 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static std::vector to_bjdata(const basic_json& j, const bool use_size = false, - const bool use_type = false) + const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { std::vector result; - to_bjdata(j, result, use_size, use_type); + to_bjdata(j, result, use_size, use_type, version); return result; } /// @brief create a BJData serialization of a given JSON value /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static void to_bjdata(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) + const bool use_size = false, const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { - binary_writer(o).write_ubjson(j, use_size, use_type, true, true); + binary_writer(o).write_ubjson(j, use_size, use_type, true, true, version); } /// @brief create a BJData serialization of a given JSON value /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static void to_bjdata(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) + const bool use_size = false, const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { - binary_writer(o).write_ubjson(j, use_size, use_type, true, true); + binary_writer(o).write_ubjson(j, use_size, use_type, true, true, version); } /// @brief create a BSON serialization of a given JSON value @@ -23668,9 +24427,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23684,9 +24443,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23709,10 +24468,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23725,9 +24484,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23740,9 +24499,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23763,10 +24522,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23779,9 +24538,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23794,9 +24553,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23817,10 +24576,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23833,9 +24592,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23848,9 +24607,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23863,9 +24622,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23878,9 +24637,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23901,10 +24660,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } /// @} @@ -24005,7 +24764,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec // the valid JSON Patch operations enum class patch_operations {add, remove, replace, move, copy, test, invalid}; - const auto get_op = [](const std::string & op) + const auto get_op = [](const string_t& op) { if (op == "add") { @@ -24036,7 +24795,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec }; // wrapper for "add" operation; add value at ptr - const auto operation_add = [&result](json_pointer & ptr, basic_json val) + const auto operation_add = [&result](json_pointer & ptr, const basic_json & val) { // adding to the root of the target document means replacing it if (ptr.empty()) @@ -24142,8 +24901,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (const auto& val : json_patch) { // wrapper to get a value for an operation - const auto get_value = [&val](const std::string & op, - const std::string & member, + const auto get_value = [&val](const string_t& op, + const string_t& member, bool string_type) -> basic_json & { // find value @@ -24177,8 +24936,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec } // collect mandatory members - const auto op = get_value("op", "op", true).template get(); - const auto path = get_value(op, "path", true).template get(); + const auto op = get_value("op", "op", true).template get(); + const auto path = get_value(op, "path", true).template get(); json_pointer ptr(path); switch (get_op(op)) @@ -24204,7 +24963,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec case patch_operations::move: { - const auto from_path = get_value("move", "from", true).template get(); + const auto from_path = get_value("move", "from", true).template get(); json_pointer from_ptr(from_path); // the "from" location must exist - use at() @@ -24221,7 +24980,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec case patch_operations::copy: { - const auto from_path = get_value("copy", "from", true).template get(); + const auto from_path = get_value("copy", "from", true).template get(); const json_pointer from_ptr(from_path); // the "from" location must exist - use at() @@ -24281,7 +25040,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/diff/ JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json diff(const basic_json& source, const basic_json& target, - const std::string& path = "") + const string_t& path = "") { // the patch basic_json result(value_t::array); @@ -24311,7 +25070,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec while (i < source.size() && i < target.size()) { // recursive call to compare array values at index i - auto temp_diff = diff(source[i], target[i], detail::concat(path, '/', std::to_string(i))); + auto temp_diff = diff(source[i], target[i], detail::concat(path, '/', detail::to_string(i))); result.insert(result.end(), temp_diff.begin(), temp_diff.end()); ++i; } @@ -24328,7 +25087,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec result.insert(result.begin() + end_index, object( { {"op", "remove"}, - {"path", detail::concat(path, '/', std::to_string(i))} + {"path", detail::concat(path, '/', detail::to_string(i))} })); ++i; } @@ -24339,7 +25098,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec result.push_back( { {"op", "add"}, - {"path", detail::concat(path, "/-")}, + {"path", detail::concat(path, "/-")}, {"value", target[i]} }); ++i; @@ -24354,7 +25113,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto path_key = detail::concat(path, '/', detail::escape(it.key())); + const auto path_key = detail::concat(path, '/', detail::escape(it.key())); if (target.find(it.key()) != target.end()) { @@ -24378,7 +25137,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto path_key = detail::concat(path, '/', detail::escape(it.key())); + const auto path_key = detail::concat(path, '/', detail::escape(it.key())); result.push_back( { {"op", "add"}, {"path", path_key}, @@ -24559,10 +25318,10 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -24593,6 +25352,7 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC #undef JSON_HAS_CPP_14 #undef JSON_HAS_CPP_17 #undef JSON_HAS_CPP_20 + #undef JSON_HAS_CPP_23 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #undef JSON_HAS_THREE_WAY_COMPARISON @@ -24604,10 +25364,10 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT diff --git a/vendor/nlohmann/json_fwd.hpp b/vendor/nlohmann/json_fwd.hpp new file mode 100644 index 0000000000..9429171390 --- /dev/null +++ b/vendor/nlohmann/json_fwd.hpp @@ -0,0 +1,187 @@ +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ +#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// This file contains all macro definitions affecting or depending on the ABI + +#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK + #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0 + #warning "Already included a different version of the library!" + #endif + #endif +#endif + +#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum) + +#ifndef JSON_DIAGNOSTICS + #define JSON_DIAGNOSTICS 0 +#endif + +#ifndef JSON_DIAGNOSTIC_POSITIONS + #define JSON_DIAGNOSTIC_POSITIONS 0 +#endif + +#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 +#endif + +#if JSON_DIAGNOSTICS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS +#endif + +#if JSON_DIAGNOSTIC_POSITIONS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS +#endif + +#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp +#else + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION + #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 +#endif + +// Construct the namespace ABI tags component +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) + +#define NLOHMANN_JSON_ABI_TAGS \ + NLOHMANN_JSON_ABI_TAGS_CONCAT( \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS) + +// Construct the namespace version component +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ + _v ## major ## _ ## minor ## _ ## patch +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) + +#if NLOHMANN_JSON_NAMESPACE_NO_VERSION +#define NLOHMANN_JSON_NAMESPACE_VERSION +#else +#define NLOHMANN_JSON_NAMESPACE_VERSION \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ + NLOHMANN_JSON_VERSION_MINOR, \ + NLOHMANN_JSON_VERSION_PATCH) +#endif + +// Combine namespace components +#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b +#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ + NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) + +#ifndef NLOHMANN_JSON_NAMESPACE +#define NLOHMANN_JSON_NAMESPACE \ + nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN +#define NLOHMANN_JSON_NAMESPACE_BEGIN \ + namespace nlohmann \ + { \ + inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) \ + { +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_END +#define NLOHMANN_JSON_NAMESPACE_END \ + } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ + } // namespace nlohmann +#endif + + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +NLOHMANN_JSON_NAMESPACE_BEGIN + +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +/// a class to store JSON values +/// @sa https://json.nlohmann.me/api/basic_json/ +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer, + class BinaryType = std::vector, // cppcheck-suppress syntaxError + class CustomBaseClass = void> +class basic_json; + +/// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document +/// @sa https://json.nlohmann.me/api/json_pointer/ +template +class json_pointer; + +/*! +@brief default specialization +@sa https://json.nlohmann.me/api/json/ +*/ +using json = basic_json<>; + +/// @brief a minimal map-like container that preserves insertion order +/// @sa https://json.nlohmann.me/api/ordered_map/ +template +struct ordered_map; + +/// @brief specialization that maintains the insertion order of object keys +/// @sa https://json.nlohmann.me/api/ordered_json/ +using ordered_json = basic_json; + +NLOHMANN_JSON_NAMESPACE_END + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ diff --git a/common/stb_image.h b/vendor/stb/stb_image.h similarity index 100% rename from common/stb_image.h rename to vendor/stb/stb_image.h