mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-02 09:12:03 +00:00
Merge branch 'master' into compilade/refactor-kv-cache
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
#include "arg.h"
|
||||
#include "common.h"
|
||||
#include "llama.h"
|
||||
|
||||
@@ -17,9 +18,7 @@
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
static void print_usage(int argc, char ** argv, const gpt_params & params) {
|
||||
gpt_params_print_usage(argc, argv, params);
|
||||
|
||||
static void print_usage(int, char ** argv) {
|
||||
LOG_TEE("\nexample usage:\n");
|
||||
LOG_TEE("\n %s \\\n"
|
||||
" -m model.gguf -f some-text.txt [-o imatrix.dat] [--process-output] [--verbosity 1] \\\n"
|
||||
@@ -579,8 +578,7 @@ int main(int argc, char ** argv) {
|
||||
params.logits_all = true;
|
||||
params.verbosity = 1;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
print_usage(argc, argv, params);
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_IMATRIX, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -638,7 +636,8 @@ int main(int argc, char ** argv) {
|
||||
|
||||
g_collector.save_imatrix();
|
||||
|
||||
llama_print_timings(ctx);
|
||||
LOG_TEE("\n");
|
||||
llama_perf_context_print(ctx);
|
||||
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
Reference in New Issue
Block a user