mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llava-cli: fix base64 prompt (#7248)
This commit is contained in:
		| @@ -300,6 +300,19 @@ int main(int argc, char ** argv) { | |||||||
|         return 1; |         return 1; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |     if (prompt_contains_image(params.prompt)) { | ||||||
|  |         auto ctx_llava = llava_init_context(¶ms, model); | ||||||
|  |  | ||||||
|  |         auto image_embed = load_image(ctx_llava, ¶ms, ""); | ||||||
|  |  | ||||||
|  |         // process the prompt | ||||||
|  |         process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); | ||||||
|  |  | ||||||
|  |         llama_print_timings(ctx_llava->ctx_llama); | ||||||
|  |         llava_image_embed_free(image_embed); | ||||||
|  |         ctx_llava->model = NULL; | ||||||
|  |         llava_free(ctx_llava); | ||||||
|  |     } else { | ||||||
|         for (auto & image : params.image) { |         for (auto & image : params.image) { | ||||||
|             auto ctx_llava = llava_init_context(¶ms, model); |             auto ctx_llava = llava_init_context(¶ms, model); | ||||||
|  |  | ||||||
| @@ -317,6 +330,8 @@ int main(int argc, char ** argv) { | |||||||
|             ctx_llava->model = NULL; |             ctx_llava->model = NULL; | ||||||
|             llava_free(ctx_llava); |             llava_free(ctx_llava); | ||||||
|         } |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|     llama_free_model(model); |     llama_free_model(model); | ||||||
|  |  | ||||||
|     return 0; |     return 0; | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 k.h.lai
					k.h.lai