llama : reuse compute graphs

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-07-01 15:59:43 +03:00
parent bac8bed248
commit 76681e3c73
17 changed files with 458 additions and 187 deletions

View File

@@ -1464,6 +1464,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.swa_full = true;
}
).set_env("LLAMA_ARG_SWA_FULL"));
add_opt(common_arg(
{"--graph-reuse", "-gr"},
string_format("reuse previous compute graphs when possible (default: %s)"
"[(more info)](https://github.com/ggml-org/llama.cpp/pull/14482)", params.graph_reuse ? "true" : "false"),
[](common_params & params) {
params.graph_reuse = true;
}
).set_env("LLAMA_ARG_GRAPH_REUSE"));
add_opt(common_arg(
{"--no-context-shift"},
string_format("disables context shift on infinite text generation (default: %s)", params.ctx_shift ? "disabled" : "enabled"),

View File

@@ -1157,6 +1157,7 @@ struct llama_context_params common_context_params_to_llama(const common_params &
cparams.no_perf = params.no_perf;
cparams.op_offload = !params.no_op_offload;
cparams.swa_full = params.swa_full;
cparams.graph_reuse = params.graph_reuse;
cparams.type_k = params.cache_type_k;
cparams.type_v = params.cache_type_v;

View File

@@ -330,6 +330,7 @@ struct common_params {
bool no_perf = false; // disable performance metrics
bool ctx_shift = true; // context shift on inifinite text generation
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
bool graph_reuse = false; // reuse previous compute graphs when possible
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
bool use_mmap = true; // use mmap for faster loads