diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index a6dc313fbaa172..0d9050f5fad14c 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -4192,7 +4192,7 @@ def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): @property def _is_quantized_training_enabled(self): - logger.warning( + warnings.warn( "`_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead", FutureWarning, )