mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
mtmd : support Kimi VL model (#15458)
* convert : fix tensor naming conflict for llama 4 vision * convert ok * support kimi vision model * clean up * fix style * fix calc number of output tokens * refactor resize_position_embeddings * add test case * rename build fn * correct a small bug
This commit is contained in:
@@ -2850,6 +2850,7 @@ class VisionProjectorType:
|
||||
QWEN25O = "qwen2.5o" # omni
|
||||
VOXTRAL = "voxtral"
|
||||
LFM2 = "lfm2"
|
||||
KIMIVL = "kimivl"
|
||||
|
||||
|
||||
# Items here are (block size, type size)
|
||||
|
||||
Reference in New Issue
Block a user