mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
* add grok-2 support * type fix * type fix * type fix * "fix" vocab for invalid sequences * fix expert tensor mapping and spaces in vocab * add chat template * fix norm tensor mapping * rename layer_out_norm to ffn_post_norm * ensure ffn_post_norm is mapped * fix experts merging * remove erroneous FFN_GATE entry * concatenate split tensors and add more metadata * process all expert layers and try cat instead of hstack * add support for community BPE vocab * fix expert feed forward length and ffn_down concat * commit this too * add ffn_up/gate/down, unsure if sequence is right * add ffn_gate/down/up to tensor names * correct residual moe (still not working) * mess-- * fix embedding scale being applied twice * add built in chat template * change beta fast for grok if default value * remove spm vocab in favor of community bpe vocab * change attention temp length metadata type to integer * update attention temp length metadata * remove comment * replace M_SQRT2 with std::sqrt(2) * add yarn metadata, move defaults to hparams
67 lines
1.9 KiB
C++
67 lines
1.9 KiB
C++
#pragma once
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include <cstdint>
|
|
|
|
enum llm_chat_template {
|
|
LLM_CHAT_TEMPLATE_CHATML,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V1,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_PHI_3,
|
|
LLM_CHAT_TEMPLATE_PHI_4,
|
|
LLM_CHAT_TEMPLATE_FALCON_3,
|
|
LLM_CHAT_TEMPLATE_ZEPHYR,
|
|
LLM_CHAT_TEMPLATE_MONARCH,
|
|
LLM_CHAT_TEMPLATE_GEMMA,
|
|
LLM_CHAT_TEMPLATE_ORION,
|
|
LLM_CHAT_TEMPLATE_OPENCHAT,
|
|
LLM_CHAT_TEMPLATE_VICUNA,
|
|
LLM_CHAT_TEMPLATE_VICUNA_ORCA,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_2,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
|
|
LLM_CHAT_TEMPLATE_COMMAND_R,
|
|
LLM_CHAT_TEMPLATE_LLAMA_3,
|
|
LLM_CHAT_TEMPLATE_CHATGLM_3,
|
|
LLM_CHAT_TEMPLATE_CHATGLM_4,
|
|
LLM_CHAT_TEMPLATE_GLMEDGE,
|
|
LLM_CHAT_TEMPLATE_MINICPM,
|
|
LLM_CHAT_TEMPLATE_EXAONE_3,
|
|
LLM_CHAT_TEMPLATE_EXAONE_4,
|
|
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
|
LLM_CHAT_TEMPLATE_GRANITE,
|
|
LLM_CHAT_TEMPLATE_GIGACHAT,
|
|
LLM_CHAT_TEMPLATE_MEGREZ,
|
|
LLM_CHAT_TEMPLATE_YANDEX,
|
|
LLM_CHAT_TEMPLATE_BAILING,
|
|
LLM_CHAT_TEMPLATE_LLAMA4,
|
|
LLM_CHAT_TEMPLATE_SMOLVLM,
|
|
LLM_CHAT_TEMPLATE_DOTS1,
|
|
LLM_CHAT_TEMPLATE_HUNYUAN_MOE,
|
|
LLM_CHAT_TEMPLATE_OPENAI_MOE,
|
|
LLM_CHAT_TEMPLATE_HUNYUAN_DENSE,
|
|
LLM_CHAT_TEMPLATE_KIMI_K2,
|
|
LLM_CHAT_TEMPLATE_SEED_OSS,
|
|
LLM_CHAT_TEMPLATE_GROK_2,
|
|
LLM_CHAT_TEMPLATE_UNKNOWN,
|
|
};
|
|
|
|
struct llama_chat_message;
|
|
|
|
llm_chat_template llm_chat_template_from_str(const std::string & name);
|
|
|
|
llm_chat_template llm_chat_detect_template(const std::string & tmpl);
|
|
|
|
int32_t llm_chat_apply_template(
|
|
llm_chat_template tmpl,
|
|
const std::vector<const llama_chat_message *> & chat,
|
|
std::string & dest, bool add_ass);
|