llama : reuse compute graphs

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-07-01 15:59:43 +03:00
parent bac8bed248
commit 76681e3c73
17 changed files with 458 additions and 187 deletions

View File

@@ -330,6 +330,7 @@ struct common_params {
bool no_perf = false; // disable performance metrics
bool ctx_shift = true; // context shift on inifinite text generation
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
bool graph_reuse = false; // reuse previous compute graphs when possible
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
bool use_mmap = true; // use mmap for faster loads