mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
* add BailingMoeV2 support * update llm types * undo * undo * update llm types * add model collection link * update * almost working * correct group selection and rename n_group_exp * avoid large top_k and use argmax instead for now if we had something like argmax2 that would be equivalent, but this works fine until then * poke * skip group selection when there are no tokens * fix 1T conversion * hopefully fixed expert group selection third time's the charm? * make expert group selection generally available The new LLaDA2Moe model uses this method too, make it generally available regardless of architecture. * allow n_expert_groups to be 1 (Kimi K2) * address review suggestions
69 lines
2.0 KiB
C++
69 lines
2.0 KiB
C++
#pragma once
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include <cstdint>
|
|
|
|
enum llm_chat_template {
|
|
LLM_CHAT_TEMPLATE_CHATML,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V1,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_PHI_3,
|
|
LLM_CHAT_TEMPLATE_PHI_4,
|
|
LLM_CHAT_TEMPLATE_FALCON_3,
|
|
LLM_CHAT_TEMPLATE_ZEPHYR,
|
|
LLM_CHAT_TEMPLATE_MONARCH,
|
|
LLM_CHAT_TEMPLATE_GEMMA,
|
|
LLM_CHAT_TEMPLATE_ORION,
|
|
LLM_CHAT_TEMPLATE_OPENCHAT,
|
|
LLM_CHAT_TEMPLATE_VICUNA,
|
|
LLM_CHAT_TEMPLATE_VICUNA_ORCA,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_2,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
|
|
LLM_CHAT_TEMPLATE_COMMAND_R,
|
|
LLM_CHAT_TEMPLATE_LLAMA_3,
|
|
LLM_CHAT_TEMPLATE_CHATGLM_3,
|
|
LLM_CHAT_TEMPLATE_CHATGLM_4,
|
|
LLM_CHAT_TEMPLATE_GLMEDGE,
|
|
LLM_CHAT_TEMPLATE_MINICPM,
|
|
LLM_CHAT_TEMPLATE_EXAONE_3,
|
|
LLM_CHAT_TEMPLATE_EXAONE_4,
|
|
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
|
LLM_CHAT_TEMPLATE_GRANITE,
|
|
LLM_CHAT_TEMPLATE_GIGACHAT,
|
|
LLM_CHAT_TEMPLATE_MEGREZ,
|
|
LLM_CHAT_TEMPLATE_YANDEX,
|
|
LLM_CHAT_TEMPLATE_BAILING,
|
|
LLM_CHAT_TEMPLATE_BAILING_THINK,
|
|
LLM_CHAT_TEMPLATE_BAILING2,
|
|
LLM_CHAT_TEMPLATE_LLAMA4,
|
|
LLM_CHAT_TEMPLATE_SMOLVLM,
|
|
LLM_CHAT_TEMPLATE_DOTS1,
|
|
LLM_CHAT_TEMPLATE_HUNYUAN_MOE,
|
|
LLM_CHAT_TEMPLATE_OPENAI_MOE,
|
|
LLM_CHAT_TEMPLATE_HUNYUAN_DENSE,
|
|
LLM_CHAT_TEMPLATE_KIMI_K2,
|
|
LLM_CHAT_TEMPLATE_SEED_OSS,
|
|
LLM_CHAT_TEMPLATE_GROK_2,
|
|
LLM_CHAT_TEMPLATE_UNKNOWN,
|
|
};
|
|
|
|
struct llama_chat_message;
|
|
|
|
llm_chat_template llm_chat_template_from_str(const std::string & name);
|
|
|
|
llm_chat_template llm_chat_detect_template(const std::string & tmpl);
|
|
|
|
int32_t llm_chat_apply_template(
|
|
llm_chat_template tmpl,
|
|
const std::vector<const llama_chat_message *> & chat,
|
|
std::string & dest, bool add_ass);
|