From c421ac072d46172ab18924e1e8be53680b54ed3b Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 22 Oct 2024 13:08:41 +0200 Subject: [PATCH] lora : warn user if new token is added in the adapter (#9948) --- convert_lora_to_gguf.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py index 439a78de108ca..bc68f68afb768 100755 --- a/convert_lora_to_gguf.py +++ b/convert_lora_to_gguf.py @@ -348,6 +348,9 @@ def get_tensors(self) -> Iterator[tuple[str, Tensor]]: if ".base_layer.weight" in name: continue logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor") + if ".embed_tokens.weight" in name or ".lm_head.weight" in name: + logger.error("Embeddings is present in the adapter. This can be due to new tokens added during fine tuning") + logger.error("Hint: if you are using TRL, make sure not to call setup_chat_format()") sys.exit(1) if base_name in tensor_map: