mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
* First draft * Fix linter errors * Added missing sinks nullptr * Don't forget the llama-arch! * We're through to the generation stage. * Fix post-attention norm * Apply suggestions from code review Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> * Fix RoPE type * Fix tensor name and reorder llm_types * Update gguf-py/gguf/constants.py Remove nonexistent FFN_POST_NORM tensor Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> * Update src/llama-model.h Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> * Add basic chat template * Add chat template tests * Remake chat template test * Apply suggestions from code review Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> * Update src/llama-chat.cpp Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> * Reorder llm type descriptions * Update src/llama-model.cpp Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com> --------- Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
66 lines
1.9 KiB
C++
66 lines
1.9 KiB
C++
#pragma once
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
#include <cstdint>
|
|
|
|
enum llm_chat_template {
|
|
LLM_CHAT_TEMPLATE_CHATML,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
|
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V1,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7,
|
|
LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN,
|
|
LLM_CHAT_TEMPLATE_PHI_3,
|
|
LLM_CHAT_TEMPLATE_PHI_4,
|
|
LLM_CHAT_TEMPLATE_FALCON_3,
|
|
LLM_CHAT_TEMPLATE_ZEPHYR,
|
|
LLM_CHAT_TEMPLATE_MONARCH,
|
|
LLM_CHAT_TEMPLATE_GEMMA,
|
|
LLM_CHAT_TEMPLATE_ORION,
|
|
LLM_CHAT_TEMPLATE_OPENCHAT,
|
|
LLM_CHAT_TEMPLATE_VICUNA,
|
|
LLM_CHAT_TEMPLATE_VICUNA_ORCA,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_2,
|
|
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
|
|
LLM_CHAT_TEMPLATE_COMMAND_R,
|
|
LLM_CHAT_TEMPLATE_LLAMA_3,
|
|
LLM_CHAT_TEMPLATE_CHATGLM_3,
|
|
LLM_CHAT_TEMPLATE_CHATGLM_4,
|
|
LLM_CHAT_TEMPLATE_GLMEDGE,
|
|
LLM_CHAT_TEMPLATE_MINICPM,
|
|
LLM_CHAT_TEMPLATE_EXAONE_3,
|
|
LLM_CHAT_TEMPLATE_EXAONE_4,
|
|
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
|
LLM_CHAT_TEMPLATE_GRANITE,
|
|
LLM_CHAT_TEMPLATE_GIGACHAT,
|
|
LLM_CHAT_TEMPLATE_MEGREZ,
|
|
LLM_CHAT_TEMPLATE_YANDEX,
|
|
LLM_CHAT_TEMPLATE_BAILING,
|
|
LLM_CHAT_TEMPLATE_LLAMA4,
|
|
LLM_CHAT_TEMPLATE_SMOLVLM,
|
|
LLM_CHAT_TEMPLATE_DOTS1,
|
|
LLM_CHAT_TEMPLATE_HUNYUAN_MOE,
|
|
LLM_CHAT_TEMPLATE_OPENAI_MOE,
|
|
LLM_CHAT_TEMPLATE_HUNYUAN_DENSE,
|
|
LLM_CHAT_TEMPLATE_KIMI_K2,
|
|
LLM_CHAT_TEMPLATE_SEED_OSS,
|
|
LLM_CHAT_TEMPLATE_UNKNOWN,
|
|
};
|
|
|
|
struct llama_chat_message;
|
|
|
|
llm_chat_template llm_chat_template_from_str(const std::string & name);
|
|
|
|
llm_chat_template llm_chat_detect_template(const std::string & tmpl);
|
|
|
|
int32_t llm_chat_apply_template(
|
|
llm_chat_template tmpl,
|
|
const std::vector<const llama_chat_message *> & chat,
|
|
std::string & dest, bool add_ass);
|