mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-07 09:57:00 +00:00
model : add hunyuan dense (#14878)
* support hunyuan_v1_dense
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* update hunyuan_moe to hunyuan_v1_moe
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* fix rope alpha assert and bos token
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* add blank line
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* Revert "update hunyuan_moe to hunyuan_v1_moe"
This reverts commit aa973ca219.
* use hunyuan_dense instead of hunyuan_v1_dense
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* fix hunyuan_moe chat template
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* remove leftover code
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* update hunyuan dense chat template
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
* fix hunyuan dense vocab and chat template
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
---------
Signed-off-by: stevenkuang <stevenkuang@tencent.com>
This commit is contained in:
@@ -307,6 +307,7 @@ struct llm_tokenizer_bpe : llm_tokenizer {
|
||||
};
|
||||
break;
|
||||
case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM:
|
||||
case LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE:
|
||||
regex_exprs = {
|
||||
"\\p{N}{1,3}",
|
||||
"[一-龥-ゟ゠-ヿ]+",
|
||||
@@ -1964,6 +1965,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
||||
tokenizer_pre == "hunyuan") {
|
||||
pre_type = LLAMA_VOCAB_PRE_TYPE_HUNYUAN;
|
||||
clean_spaces = false;
|
||||
} else if (
|
||||
tokenizer_pre == "hunyuan-dense") {
|
||||
pre_type = LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE;
|
||||
clean_spaces = false;
|
||||
} else if (
|
||||
tokenizer_pre == "kimi-k2") {
|
||||
pre_type = LLAMA_VOCAB_PRE_TYPE_KIMI_K2;
|
||||
|
||||
Reference in New Issue
Block a user