From 9d262f4bad0d37838100133537aaf0a83835ed12 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 19 Aug 2025 08:45:26 +0300 Subject: [PATCH] server : remove swa_full warning (#15399) --- src/llama-context.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 7d7abad5d4..1ebfc88ab6 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -145,11 +145,6 @@ llama_context::llama_context( __func__, n_ctx_per_seq, hparams.n_ctx_train); } - if (!params.swa_full && cparams.n_seq_max > 1 && hparams.is_swa_any()) { - LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n", - __func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573"); - } - if (!hparams.vocab_only) { // GPU backends for (auto * dev : model.devices) {