From 5b15816cf4b625186c4aa2d28cfbed372ee8f18b Mon Sep 17 00:00:00 2001 From: Wing Lian Date: Thu, 22 Aug 2024 19:16:32 -0400 Subject: [PATCH] drop valueerror as this was from when 4bit required gptq --- src/axolotl/utils/models.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/axolotl/utils/models.py b/src/axolotl/utils/models.py index 8d24524a23..35005bfce5 100644 --- a/src/axolotl/utils/models.py +++ b/src/axolotl/utils/models.py @@ -96,12 +96,6 @@ def check_model_config(cfg: DictDefault, model_config: Union[AutoConfig, DictDef "Please make sure to point to a GPTQ model." ) - if not cfg.gptq and quant_config_exists and not cfg.load_in_4bit: - raise ValueError( - "model_config.quantization_config is set but `gptq` flag is not. " - "Please use the `gptq` flag to train quantized model or point to a non-quantized model." - ) - lora_modules_to_save = get_linear_embedding_layers(model_config.model_type) if ( cfg.adapter