diff --git a/src/transformers/quantizers/base.py b/src/transformers/quantizers/base.py index c8eb8bacaa7810..68adc3954df45d 100644 --- a/src/transformers/quantizers/base.py +++ b/src/transformers/quantizers/base.py @@ -15,7 +15,6 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, Union from ..utils import is_torch_available -from ..utils.import_utils import _is_package_available from ..utils.quantization_config import QuantizationConfigMixin @@ -64,8 +63,6 @@ def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): f"pass `pre_quantized=True` while knowing what you are doing." ) - self.check_packages_compatibility() - def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": """ Some quantization methods require to explicitly set the dtype of the model to a @@ -152,25 +149,6 @@ def validate_environment(self, *args, **kwargs): """ return - def check_packages_compatibility(self): - """ - Check the compatibility of the quantizer with respect to the current environment. Loops over all packages - name under `self.required_packages` and checks if that package is available. - """ - if self.required_packages is not None: - non_available_packages = [] - for package_name in self.required_packages: - is_package_available = _is_package_available(package_name) - if not is_package_available: - non_available_packages.append(package_name) - - if len(non_available_packages) > 0: - raise ValueError( - f"The packages {self.required_packages} are required to use {self.__class__.__name__}" - f" the following packages are missing in your environment: {non_available_packages}, please make sure" - f" to install them in order to use the quantizer." - ) - def preprocess_model(self, model: "PreTrainedModel", **kwargs): """ Setting model attributes and/or converting model before weights loading. At this point