mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
convert : converting mmproj for Qwen2/2.5VL from convert_hf_to_gguf (#13209)
* wip
* qwen2.5vl ok
* vision: fix models missing "text_config"
* add test
* fix test repo name
* fix 32B model
* Revert "fix 32B model"
This reverts commit 651752f1ae.
* clarify about 32B
* rm qwen surgery script
* update llava/readme
* move V_ENC_EMBD_PATCH handling to Qwen2VLVisionModel
This commit is contained in:
@@ -984,6 +984,9 @@ class GGUFWriter:
|
||||
def add_vision_projector_scale_factor(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.Projector.SCALE_FACTOR, value)
|
||||
|
||||
def add_vision_n_wa_pattern(self, value: int) -> None:
|
||||
self.add_uint32(Keys.ClipVision.N_WA_PATTERN, value)
|
||||
|
||||
def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes:
|
||||
pack_prefix = ''
|
||||
if not skip_pack_prefix:
|
||||
|
||||
Reference in New Issue
Block a user