Skip to content

Commit

Permalink
Make check dummy inputs import conditional in quantizer
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 18, 2024
1 parent a76be08 commit 6d07bb1
Showing 1 changed file with 1 addition and 2 deletions.
3 changes: 1 addition & 2 deletions optimum/intel/openvino/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@
from transformers import AutoProcessor, AutoTokenizer, DataCollator, PreTrainedModel, default_data_collator
from transformers.pytorch_utils import Conv1D
from transformers.utils import is_accelerate_available

from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed
from optimum.exporters.tasks import TasksManager
from optimum.quantization_base import OptimumQuantizer

Expand Down Expand Up @@ -524,6 +522,7 @@ def _quantize_torchmodel(

quantization_config = ov_config.quantization_config
if isinstance(quantization_config, OVWeightQuantizationConfig):
from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed
if stateful:
# patch model before weight compression
model = patch_model_with_bettertransformer(model)
Expand Down

0 comments on commit 6d07bb1

Please sign in to comment.