diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index 7c3925b30293ba..7cc27a34554324 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -23,7 +23,6 @@ from transformers import DonutProcessor, NougatProcessor, TrOCRProcessor from transformers.testing_utils import ( - is_flaky, require_levenshtein, require_nltk, require_sentencepiece, @@ -286,6 +285,8 @@ def check_encoder_decoder_model_generate(self, config, decoder_config, pixel_val enc_dec_model.config.eos_token_id = None if hasattr(enc_dec_model.config, "decoder") and hasattr(enc_dec_model.config.decoder, "eos_token_id"): enc_dec_model.config.decoder.eos_token_id = None + if hasattr(enc_dec_model.generation_config, "eos_token_id"): + enc_dec_model.generation_config.eos_token_id = None enc_dec_model.to(torch_device) inputs = pixel_values @@ -324,10 +325,6 @@ def test_encoder_decoder_model_output_attentions(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_output_attentions(**input_ids_dict) - # FIXME @gante: flaky test - @is_flaky( - description="Fails on distributed runs e.g.: https://app.circleci.com/pipelines/github/huggingface/transformers/83611/workflows/666b01c9-1be8-4daa-b85d-189e670fc168/jobs/1078635/tests#failed-test-0" - ) def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict)