mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 92ecdcc06a
			
		
	
	92ecdcc06a
	
	
	
		
			
			* wip llama 4 conversion * rm redundant __init__ * fix conversion * fix conversion * test impl * try this * reshape patch_embeddings_0 * fix view * rm ffn_post_norm * cgraph ok * f32 for pos embd * add image marker tokens * Llama4UnfoldConvolution * correct pixel shuffle * fix merge conflicts * correct * add debug_graph * logits matched, but it still preceives the image incorrectly * fix style * add image_grid_pinpoints * handle llama 4 preprocessing * rm load_image_size * rm unused line * fix * small fix 2 * add test & docs * fix llava-1.6 test * test: add notion of huge models * add comment * add warn about degraded quality
		
			
				
	
	
		
			96 lines
		
	
	
		
			4.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			96 lines
		
	
	
		
			4.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #pragma once
 | |
| 
 | |
| #include "ggml.h"
 | |
| #include <stddef.h>
 | |
| #include <stdint.h>
 | |
| 
 | |
| struct clip_ctx;
 | |
| 
 | |
| struct clip_image_size {
 | |
|     int width;
 | |
|     int height;
 | |
| };
 | |
| 
 | |
| struct clip_image_f32;
 | |
| struct clip_image_u8_batch;
 | |
| struct clip_image_f32_batch;
 | |
| 
 | |
| struct clip_context_params {
 | |
|     bool use_gpu;
 | |
|     enum ggml_log_level verbosity;
 | |
| };
 | |
| 
 | |
| struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params);
 | |
| 
 | |
| void clip_free(struct clip_ctx * ctx);
 | |
| 
 | |
| size_t clip_embd_nbytes(const struct clip_ctx * ctx);
 | |
| size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h);
 | |
| 
 | |
| int32_t clip_get_image_size (const struct clip_ctx * ctx);
 | |
| int32_t clip_get_patch_size (const struct clip_ctx * ctx);
 | |
| int32_t clip_get_hidden_size(const struct clip_ctx * ctx);
 | |
| 
 | |
| // TODO: should be enum, not string
 | |
| const char * clip_patch_merge_type(const struct clip_ctx * ctx);
 | |
| 
 | |
| const int32_t * clip_image_grid(const struct clip_ctx * ctx);
 | |
| size_t get_clip_image_grid_size(const struct clip_ctx * ctx);
 | |
| 
 | |
| int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img);
 | |
| 
 | |
| // for M-RoPE, this will be the number of token positions in X and Y directions
 | |
| // for other models, X will be the total number of tokens and Y will be 1
 | |
| int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img);
 | |
| int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img);
 | |
| 
 | |
| // this should be equal to the embedding dimension of the text model
 | |
| int clip_n_mmproj_embd(const struct clip_ctx * ctx);
 | |
| 
 | |
| struct clip_image_size      * clip_image_size_init(void);
 | |
| struct clip_image_u8        * clip_image_u8_init (void);
 | |
| struct clip_image_f32       * clip_image_f32_init(void);
 | |
| struct clip_image_f32_batch * clip_image_f32_batch_init(void); // only used by libllava
 | |
| 
 | |
| // nx, ny are the output image dimensions
 | |
| unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny);
 | |
| 
 | |
| void clip_image_size_free (struct clip_image_size * img_size);
 | |
| void clip_image_u8_free (struct clip_image_u8  * img);
 | |
| void clip_image_f32_free(struct clip_image_f32 * img);
 | |
| void clip_image_u8_batch_free (struct clip_image_u8_batch  * batch);
 | |
| void clip_image_f32_batch_free(struct clip_image_f32_batch * batch);
 | |
| 
 | |
| // use for accessing underlay data of clip_image_f32_batch
 | |
| size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch); // equivalent to batch->size()
 | |
| size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->nx
 | |
| size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->ny
 | |
| struct clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->data
 | |
| 
 | |
| /**
 | |
|  * Build image from pixels decoded by other libraries instead of stb_image.h for better performance.
 | |
|  * The memory layout is RGBRGBRGB..., input buffer length must be 3*nx*ny bytes
 | |
|  */
 | |
| void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, struct clip_image_u8 * img);
 | |
| 
 | |
| bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
 | |
| 
 | |
| /** interpret bytes as an image file with length bytes_length, and use the result to populate img */
 | |
| bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
 | |
| 
 | |
| /** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */
 | |
| bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
 | |
| 
 | |
| struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
 | |
| 
 | |
| bool clip_image_encode      (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
 | |
| bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
 | |
| 
 | |
| int clip_is_minicpmv(const struct clip_ctx * ctx);
 | |
| bool clip_is_glm(const struct clip_ctx * ctx);
 | |
| bool clip_is_qwen2vl(const struct clip_ctx * ctx);
 | |
| bool clip_is_llava(const struct clip_ctx * ctx);
 | |
| bool clip_is_gemma3(const struct clip_ctx * ctx);
 | |
| 
 | |
| bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec);
 |