From 09186fabbe05236f2b9446ba6c643cb737540d10 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 6 Jan 2025 13:41:12 +0100 Subject: [PATCH] llama : remove check flash_attn with lora (#11104) --- src/llama.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index c162c31a67a40..ebd6e3b2941c5 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set( struct llama_context * ctx, struct llama_lora_adapter * adapter, float scale) { - if (ctx->cparams.flash_attn) { - LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__); - return -1; - } - ctx->lora_adapters[adapter] = scale; - return 0; }