llama : remove check flash_attn with lora (#11104)

This commit is contained in:
Xuan Son Nguyen
2025-01-06 13:41:12 +01:00
committed by GitHub
parent 96a1dc27c3
commit 09186fabbe

View File

@@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
struct llama_context * ctx,
struct llama_lora_adapter * adapter,
float scale) {
if (ctx->cparams.flash_attn) {
LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
return -1;
}
ctx->lora_adapters[adapter] = scale;
return 0;
}