mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	common : reimplement logging (#9418)
https://github.com/ggerganov/llama.cpp/pull/9418
This commit is contained in:
		| @@ -7,9 +7,12 @@ | ||||
| #include "llama.h" | ||||
| #include "ggml.h" | ||||
|  | ||||
| #include <algorithm> | ||||
| #include <cstdio> | ||||
| #include <cstdlib> | ||||
| #include <cstring> | ||||
| #include <vector> | ||||
| #include <iostream> // TODO: remove me | ||||
|  | ||||
| struct llava_context { | ||||
|     struct clip_ctx * ctx_clip = NULL; | ||||
| @@ -18,14 +21,8 @@ struct llava_context { | ||||
| }; | ||||
|  | ||||
| static void show_additional_info(int /*argc*/, char ** argv) { | ||||
|     LOG_TEE("\nexample usage:\n\n%s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); | ||||
|     LOG_TEE("\nnote: a lower temperature value like 0.1 is recommended for better quality.\n"); | ||||
| } | ||||
|  | ||||
| static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) { | ||||
|     (void) level; | ||||
|     (void) user_data; | ||||
|     LOG_TEE("%s", text); | ||||
|     LOG("\nexample usage:\n\n%s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); | ||||
|     LOG("\nnote: a lower temperature value like 0.1 is recommended for better quality.\n"); | ||||
| } | ||||
|  | ||||
| static struct llama_model * llava_init(gpt_params * params) { | ||||
| @@ -36,7 +33,7 @@ static struct llama_model * llava_init(gpt_params * params) { | ||||
|  | ||||
|     llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); | ||||
|     if (model == NULL) { | ||||
|         LOG_TEE("%s: error: unable to load model\n" , __func__); | ||||
|         LOG_ERR("%s: unable to load model\n" , __func__); | ||||
|         return NULL; | ||||
|     } | ||||
|     return model; | ||||
| @@ -51,7 +48,7 @@ static struct llava_context * llava_init_context(gpt_params * params, llama_mode | ||||
|     llama_context_params ctx_params = llama_context_params_from_gpt_params(*params); | ||||
|     if (params->n_ctx < 2048) { | ||||
|         // warn user here, "Image processing requires at least 2048 context, setting context to 2048" | ||||
|         LOG_TEE("%s: warn: Image processing requires at least 2048 context, setting context to 2048\n" , __func__); | ||||
|         LOG_WRN("%s: Image processing requires at least 2048 context, setting context to 2048\n" , __func__); | ||||
|         ctx_params.n_ctx = 2048; | ||||
|     } else { | ||||
|         ctx_params.n_ctx = params->n_ctx; | ||||
| @@ -60,11 +57,11 @@ static struct llava_context * llava_init_context(gpt_params * params, llama_mode | ||||
|     llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); | ||||
|  | ||||
|     if (ctx_llama == NULL) { | ||||
|         LOG_TEE("%s: error: failed to create the llama_context\n" , __func__); | ||||
|         LOG_ERR("%s: failed to create the llama_context\n" , __func__); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); | ||||
|     auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); | ||||
|  | ||||
|     ctx_llava->ctx_llama = ctx_llama; | ||||
|     ctx_llava->model = model; | ||||
| @@ -89,7 +86,7 @@ static struct clip_ctx * clip_init_context(gpt_params * params) { | ||||
|     if (prompt.empty()) { | ||||
|         prompt = "describe the image in detail."; | ||||
|     } | ||||
|     auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); | ||||
|     auto * ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); | ||||
|     return ctx_clip; | ||||
| } | ||||
|  | ||||
| @@ -101,7 +98,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke | ||||
|             n_eval = n_batch; | ||||
|         } | ||||
|         if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { | ||||
|             LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); | ||||
|             LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); | ||||
|             return false; | ||||
|         } | ||||
|         *n_past += n_eval; | ||||
| @@ -125,7 +122,7 @@ static void process_eval_image_embed(struct llava_context * ctx_llava, const str | ||||
|     float * image_embed = (float *)malloc(clip_embd_nbytes(ctx_llava->ctx_clip)); | ||||
|     std::memcpy(image_embed, embeds->embed + idx * clip_n_patches(ctx_llava->ctx_clip) * clip_n_mmproj_embd(ctx_llava->ctx_clip), clip_embd_nbytes(ctx_llava->ctx_clip)); | ||||
|  | ||||
|     auto slice_embed = (llava_image_embed*)malloc(sizeof(llava_image_embed)); | ||||
|     auto * slice_embed = (llava_image_embed*)malloc(sizeof(llava_image_embed)); | ||||
|     slice_embed->embed = image_embed; | ||||
|     slice_embed->n_image_pos = clip_n_patches(ctx_llava->ctx_clip); | ||||
|     llava_eval_image_embed(ctx_llava->ctx_llama, slice_embed, n_batch, n_past); | ||||
| @@ -143,7 +140,7 @@ static void process_image(struct llava_context * ctx_llava, struct llava_image_e | ||||
|     else if (has_minicpmv_projector == 3) { | ||||
|         system_prompt = "<|im_start|>user\n"; | ||||
|     } | ||||
|     LOG_TEE("%s: image token past: %d\n", __func__, n_past); | ||||
|     LOG_INF("%s: image token past: %d\n", __func__, n_past); | ||||
|     eval_string(ctx_llava->ctx_llama, (system_prompt+"").c_str(), params->n_batch, &n_past, false); | ||||
| @@ -162,7 +159,7 @@ static void process_image(struct llava_context * ctx_llava, struct llava_image_e | ||||
|         } | ||||
|         eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false); | ||||
|     } | ||||
|     LOG_TEE("%s: image token past: %d\n", __func__, n_past); | ||||
|     LOG_INF("%s: image token past: %d\n", __func__, n_past); | ||||
| } | ||||
|  | ||||
| static const char * sample(struct gpt_sampler * smpl, | ||||
| @@ -181,42 +178,42 @@ static const char * sample(struct gpt_sampler * smpl, | ||||
| } | ||||
|  | ||||
| static struct llava_context * minicpmv_init(gpt_params * params, const std::string & fname, int &n_past){ | ||||
|     auto ctx_clip = clip_init_context(params); | ||||
|     auto embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str()); | ||||
|     auto * ctx_clip = clip_init_context(params); | ||||
|     auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str()); | ||||
|     if (!embeds) { | ||||
|         std::cerr << "error: failed to load image " << fname << ". Terminating\n\n"; | ||||
|         LOG_ERR("failed to load image %s. Terminating\n\n", fname.c_str()); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     // process the prompt | ||||
|     if (params->prompt.empty() && params->interactive == false) { | ||||
|         LOG_TEE("prompt should be given or interactive mode should be on"); | ||||
|         LOG_ERR("prompt should be given or interactive mode should be on"); | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     auto model = llava_init(params); | ||||
|     auto * model = llava_init(params); | ||||
|     if (model == NULL) { | ||||
|         fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__); | ||||
|         return NULL; | ||||
|     } | ||||
|     const int64_t t_llava_init_start_us = ggml_time_us(); | ||||
|     auto ctx_llava = llava_init_context(params, model); | ||||
|     auto * ctx_llava = llava_init_context(params, model); | ||||
|     ctx_llava->ctx_clip = ctx_clip; | ||||
|     const int64_t t_llava_init_end_us = ggml_time_us(); | ||||
|     float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0; | ||||
|     LOG_TEE("\n%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms); | ||||
|     LOG_INF("%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms); | ||||
|  | ||||
|     const int64_t t_process_image_start_us = ggml_time_us(); | ||||
|     process_image(ctx_llava, embeds, params, n_past); | ||||
|     const int64_t t_process_image_end_us = ggml_time_us(); | ||||
|     float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0; | ||||
|     LOG_TEE("\n%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms); | ||||
|     LOG_INF("%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms); | ||||
|  | ||||
|     llava_image_embed_free(embeds); | ||||
|     return ctx_llava; | ||||
| } | ||||
|  | ||||
| static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_params * params, std::string prompt, int &n_past, bool is_first = false){ | ||||
| static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_params * params, const std::string & prompt, int & n_past, bool is_first = false){ | ||||
|     std::string user_prompt = prompt; | ||||
|     int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip); | ||||
|     if (!is_first) { | ||||
| @@ -238,7 +235,7 @@ static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_par | ||||
|  | ||||
|     // generate the response | ||||
|  | ||||
|     LOG_TEE("\n"); | ||||
|     LOG_INF("\n"); | ||||
|  | ||||
|     struct gpt_sampler * smpl = gpt_sampler_init(ctx_llava->model, params->sparams); | ||||
|     return smpl; | ||||
| @@ -259,12 +256,7 @@ int main(int argc, char ** argv) { | ||||
|         return 1; | ||||
|     } | ||||
|  | ||||
| #ifndef LOG_DISABLE_LOGS | ||||
|     log_set_target(log_filename_generator("llava", "log")); | ||||
|     LOG_TEE("Log start\n"); | ||||
|     log_dump_cmdline(argc, argv); | ||||
|     llama_log_set(llama_log_callback_logTee, nullptr); | ||||
| #endif // LOG_DISABLE_LOGS | ||||
|     gpt_init(); | ||||
|  | ||||
|     if (params.mmproj.empty() || (params.image.empty())) { | ||||
|         show_additional_info(argc, argv); | ||||
| @@ -273,21 +265,23 @@ int main(int argc, char ** argv) { | ||||
|  | ||||
|     for (auto & image : params.image) { | ||||
|         int n_past = 0; | ||||
|         auto ctx_llava = minicpmv_init(¶ms, image, n_past); | ||||
|         auto * ctx_llava = minicpmv_init(¶ms, image, n_past); | ||||
|  | ||||
|         if (!params.prompt.empty()) { | ||||
|             LOG_TEE("<user>%s\n", params.prompt.c_str()); | ||||
|             LOG_TEE("<assistant>"); | ||||
|             auto smpl = llama_init(ctx_llava, ¶ms, params.prompt.c_str(), n_past, true); | ||||
|             LOG("<user>%s\n", params.prompt.c_str()); | ||||
|             LOG("<assistant>"); | ||||
|             auto * smpl = llama_init(ctx_llava, ¶ms, params.prompt, n_past, true); | ||||
|             const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict; | ||||
|             std::string response = ""; | ||||
|             std::string response; | ||||
|             bool have_tmp = false; | ||||
|             for (int i = 0; i < max_tgt_len; i++) { | ||||
|                 auto tmp = llama_loop(ctx_llava, smpl, n_past); | ||||
|                 const auto * tmp = llama_loop(ctx_llava, smpl, n_past); | ||||
|                 response += tmp; | ||||
|                 if (strcmp(tmp, "</s>") == 0){ | ||||
|                     if(!have_tmp)continue; | ||||
|                     else break; | ||||
|                     if (!have_tmp) { | ||||
|                         continue; | ||||
|                     } | ||||
|                     break; | ||||
|                 } | ||||
|                 if (strstr(tmp, "###")) break; // Yi-VL behavior | ||||
|                 have_tmp = true; | ||||
| @@ -299,15 +293,15 @@ int main(int argc, char ** argv) { | ||||
|             gpt_sampler_free(smpl); | ||||
|         }else { | ||||
|             while (true) { | ||||
|                 LOG_TEE("<user>"); | ||||
|                 LOG("<user>"); | ||||
|                 std::string prompt; | ||||
|                 std::getline(std::cin, prompt); | ||||
|                 LOG_TEE("<assistant>"); | ||||
|                 auto smpl = llama_init(ctx_llava, ¶ms, prompt, n_past, true); | ||||
|                 LOG("<assistant>"); | ||||
|                 auto * smpl = llama_init(ctx_llava, ¶ms, prompt, n_past, true); | ||||
|                 const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict; | ||||
|                 std::string response = ""; | ||||
|                 std::string response; | ||||
|                 for (int i = 0; i < max_tgt_len; i++) { | ||||
|                     auto tmp = llama_loop(ctx_llava, smpl, n_past); | ||||
|                     const auto * tmp = llama_loop(ctx_llava, smpl, n_past); | ||||
|                     response += tmp; | ||||
|                     if (strcmp(tmp, "</s>") == 0) break; | ||||
|                     if (strstr(tmp, "###")) break; // Yi-VL behavior | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov