diff --git a/optimum/intel/openvino/modeling_seq2seq.py b/optimum/intel/openvino/modeling_seq2seq.py index 7f77815f4..68d19cfef 100644 --- a/optimum/intel/openvino/modeling_seq2seq.py +++ b/optimum/intel/openvino/modeling_seq2seq.py @@ -652,7 +652,8 @@ def forward( if "cache_position" in self.input_names: if cache_position is None: - cache_position = np.arange(self._past_len, self._past_len + input_ids.shape[1]) + past_len = self._get_past_length(past_key_values) + cache_position = np.arange(past_len, past_len + input_ids.shape[1]) inputs["cache_position"] = cache_position if "beam_idx" in self.input_names: diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index d02dea3f1..72d4a4801 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -208,8 +208,12 @@ def test_ov_model_static_quantization_with_auto_dataset( ov_model.save_pretrained(tmp_dir) if model_cls == OVModelForSpeechSeq2Seq: + models = [ov_model.encoder.model, ov_model.decoder.model] + + if ov_model.decoder_with_past is not None: + models.append(ov_model.decoder_with_past.model) for model, expected_fq, expected_i8 in zip( - (ov_model.encoder.model, ov_model.decoder.model, ov_model.decoder_with_past.model), + models, expected_fake_quantize, expected_int8, ): @@ -629,7 +633,9 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type, trust self.assertEqual(model._openvino_config.dtype, "int8") if model.export_feature.startswith("text2text-generation"): - models = [model.encoder, model.decoder, model.decoder_with_past] + models = [model.encoder, model.decoder] + if model.decoder_with_past is not None: + models.append(model.decoder_with_past) elif model.export_feature == "text-to-image": models = [model.unet, model.vae_encoder, model.vae_decoder] models.append(model.text_encoder if model_type == "stable-diffusion" else model.text_encoder_2) @@ -772,7 +778,9 @@ def test_ovmodel_load_with_uncompressed_weights(self, model_cls, model_type, tru MODEL_NAMES[model_type], export=True, load_in_8bit=False, trust_remote_code=trust_remote_code ) if model.export_feature.startswith("text2text-generation"): - models = [model.encoder, model.decoder, model.decoder_with_past] + models = [model.encoder, model.decoder] + if model.decoder_with_past is not None: + models.append(model.decoder_with_past) elif model.export_feature == "text-to-image": models = [model.unet, model.vae_encoder, model.vae_decoder] models.append(model.text_encoder if model_type == "stable-diffusion" else model.text_encoder_2) @@ -1205,9 +1213,14 @@ def test_calibration_data_uniqueness(self, model_name, apply_caching): processor = AutoProcessor.from_pretrained(model_id) calibration_data = [] - ov_model.decoder_with_past.request = InferRequestWrapper( - ov_model.decoder_with_past.request, calibration_data, apply_caching=apply_caching - ) + if not ov_model.stateful: + ov_model.decoder_with_past.request = InferRequestWrapper( + ov_model.decoder_with_past.request, calibration_data, apply_caching=apply_caching + ) + else: + ov_model.decoder.request = InferRequestWrapper( + ov_model.decoder.request, calibration_data, apply_caching=apply_caching + ) for _ in range(2): input_features = self._generate_random_audio_data(processor) ov_model.generate(input_features, max_new_tokens=10, min_new_tokens=10)