diff --git a/src/llmcompressor/modifiers/quantization/gptq/base.py b/src/llmcompressor/modifiers/quantization/gptq/base.py index cb21d5e9..398b1d48 100644 --- a/src/llmcompressor/modifiers/quantization/gptq/base.py +++ b/src/llmcompressor/modifiers/quantization/gptq/base.py @@ -218,15 +218,13 @@ def on_initialize(self, state: "State", **kwargs) -> bool: ) return True - # failure to trace - except torch.fx.proxy.TraceError: - model_name = state.model.__class__.__name__ - column_names = state.data.calib.dataset.column_names - warnings.warn( - f"Failed to trace {model_name} with dataset {column_names}. " - "Falling back to layer_sequential pipeline" - ) + except Exception as exception: + if isinstance(exception, torch.fx.proxy.TraceError): + model_name = state.model.__class__.__name__ + column_names = state.data.calib.dataset.column_names + warnings.warn(f"Failed to trace {model_name} with {column_names}") + warnings.warn("Falling back to layer_sequential pipeline") try: run_layer_sequential( state.model,