Skip to content

Commit

Permalink
fix quant tests
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 18, 2024
1 parent 3da5328 commit 3061342
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 7 deletions.
3 changes: 2 additions & 1 deletion optimum/intel/openvino/modeling_seq2seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,8 @@ def forward(

if "cache_position" in self.input_names:
if cache_position is None:
cache_position = np.arange(self._past_len, self._past_len + input_ids.shape[1])
past_len = self._get_past_length(past_key_values)
cache_position = np.arange(past_len, past_len + input_ids.shape[1])
inputs["cache_position"] = cache_position

if "beam_idx" in self.input_names:
Expand Down
25 changes: 19 additions & 6 deletions tests/openvino/test_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,8 +208,12 @@ def test_ov_model_static_quantization_with_auto_dataset(
ov_model.save_pretrained(tmp_dir)

if model_cls == OVModelForSpeechSeq2Seq:
models = [ov_model.encoder.model, ov_model.decoder.model]

if ov_model.decoder_with_past is not None:
models.append(ov_model.decoder_with_past.model)
for model, expected_fq, expected_i8 in zip(
(ov_model.encoder.model, ov_model.decoder.model, ov_model.decoder_with_past.model),
models,
expected_fake_quantize,
expected_int8,
):
Expand Down Expand Up @@ -629,7 +633,9 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type, trust
self.assertEqual(model._openvino_config.dtype, "int8")

if model.export_feature.startswith("text2text-generation"):
models = [model.encoder, model.decoder, model.decoder_with_past]
models = [model.encoder, model.decoder]
if model.decoder_with_past is not None:
models.append(model.decoder_with_past)
elif model.export_feature == "text-to-image":
models = [model.unet, model.vae_encoder, model.vae_decoder]
models.append(model.text_encoder if model_type == "stable-diffusion" else model.text_encoder_2)
Expand Down Expand Up @@ -772,7 +778,9 @@ def test_ovmodel_load_with_uncompressed_weights(self, model_cls, model_type, tru
MODEL_NAMES[model_type], export=True, load_in_8bit=False, trust_remote_code=trust_remote_code
)
if model.export_feature.startswith("text2text-generation"):
models = [model.encoder, model.decoder, model.decoder_with_past]
models = [model.encoder, model.decoder]
if model.decoder_with_past is not None:
models.append(model.decoder_with_past)
elif model.export_feature == "text-to-image":
models = [model.unet, model.vae_encoder, model.vae_decoder]
models.append(model.text_encoder if model_type == "stable-diffusion" else model.text_encoder_2)
Expand Down Expand Up @@ -1205,9 +1213,14 @@ def test_calibration_data_uniqueness(self, model_name, apply_caching):
processor = AutoProcessor.from_pretrained(model_id)

calibration_data = []
ov_model.decoder_with_past.request = InferRequestWrapper(
ov_model.decoder_with_past.request, calibration_data, apply_caching=apply_caching
)
if not ov_model.stateful:
ov_model.decoder_with_past.request = InferRequestWrapper(
ov_model.decoder_with_past.request, calibration_data, apply_caching=apply_caching
)
else:
ov_model.decoder.request = InferRequestWrapper(
ov_model.decoder.request, calibration_data, apply_caching=apply_caching
)
for _ in range(2):
input_features = self._generate_random_audio_data(processor)
ov_model.generate(input_features, max_new_tokens=10, min_new_tokens=10)
Expand Down

0 comments on commit 3061342

Please sign in to comment.