We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d17a809 commit 487a5e0Copy full SHA for 487a5e0
src/llama-context.cpp
@@ -123,7 +123,7 @@ llama_context::llama_context(
123
__func__, n_ctx_per_seq, hparams.n_ctx_train);
124
}
125
126
- if (!params.swa_full && cparams.n_seq_max > 1) {
+ if (!params.swa_full && cparams.n_seq_max > 1 && hparams.is_swa_any()) {
127
LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n",
128
__func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573");
129
0 commit comments