Skip to content

Commit

Permalink
skip failures (#39)
Browse files Browse the repository at this point in the history
* skip failures

* navi31 skip

* mi300 skips

* conversational test backwards compatability

* mi300 skips
  • Loading branch information
Cemberk authored and github-actions[bot] committed Sep 19, 2024
1 parent 2f26351 commit 9929ac0
Show file tree
Hide file tree
Showing 22 changed files with 169 additions and 11 deletions.
5 changes: 5 additions & 0 deletions tests/extended/test_trainer_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
require_torch_non_multi_accelerator,
slow,
torch_device,
skipIfRocm
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
Expand Down Expand Up @@ -98,16 +99,19 @@ def test_run_seq2seq_no_dist(self):

# verify that the trainer can handle non-distributed with n_gpu > 1
@require_torch_multi_accelerator
@skipIfRocm
def test_run_seq2seq_dp(self):
self.run_seq2seq_quick(distributed=False)

# verify that the trainer can handle distributed with n_gpu > 1
@require_torch_multi_accelerator
@skipIfRocm
def test_run_seq2seq_ddp(self):
self.run_seq2seq_quick(distributed=True)

@require_apex
@require_torch_gpu
@skipIfRocm
def test_run_seq2seq_apex(self):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
Expand All @@ -124,6 +128,7 @@ def test_run_seq2seq_apex(self):

@parameterized.expand(["base", "low", "high", "mixed"])
@require_torch_multi_accelerator
@skipIfRocm
def test_trainer_log_level_replica(self, experiment_id):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
experiments = {
Expand Down
2 changes: 2 additions & 0 deletions tests/fsdp/test_fsdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
require_torch_multi_accelerator,
slow,
torch_device,
skipIfRocm
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import FSDPOption, set_seed
Expand Down Expand Up @@ -176,6 +177,7 @@ def test_fsdp_config(self, sharding_strategy, dtype):
self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true")

@parameterized.expand(params, name_func=_parameterized_custom_name_func)
@skipIfRocm
def test_fsdp_config_transformers_auto_wrap(self, sharding_strategy, dtype):
output_dir = self.get_auto_remove_tmp_dir()
fsdp_config = deepcopy(self.fsdp_config)
Expand Down
2 changes: 2 additions & 0 deletions tests/generation/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
require_torch_multi_gpu,
slow,
torch_device,
skipIfRocm
)

from ..test_modeling_common import floats_tensor, ids_tensor
Expand Down Expand Up @@ -666,6 +667,7 @@ def test_beam_search_generate_dict_outputs_use_cache(self):

@require_accelerate
@require_torch_multi_accelerator
@skipIfRocm
def test_model_parallel_beam_search(self):
for model_class in self.all_generative_model_classes:
if "xpu" in torch_device:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
require_torch_fp16,
slow,
torch_device,
skipIfRocm
)

from ...generation.test_utils import GenerationTesterMixin
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import unittest

from transformers import is_torch_available, logging
from transformers.testing_utils import CaptureLogger, require_deterministic_for_xpu, require_torch, slow, torch_device
from transformers.testing_utils import CaptureLogger, require_deterministic_for_xpu, require_torch, slow, torch_device, skipIfRocm

from ...test_modeling_common import ids_tensor
from ..bart.test_modeling_bart import BartStandaloneDecoderModelTester
Expand Down Expand Up @@ -585,6 +585,7 @@ def test_encoder_decoder_model_from_pretrained_using_model_paths(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_encoder_decoder_model_from_pretrained_using_model_paths(**input_ids_dict, return_dict=False)

@skipIfRocm
def test_save_and_load_from_pretrained(self):
input_ids_dict = self.prepare_config_and_inputs()
self.check_save_and_load(**input_ids_dict)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
FastSpeech2ConformerWithHifiGanConfig,
is_torch_available,
)
from transformers.testing_utils import require_g2p_en, require_torch, require_torch_accelerator, slow, torch_device
from transformers.testing_utils import require_g2p_en, require_torch, require_torch_accelerator, slow, torch_device, skipIfRocm

from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor
Expand Down Expand Up @@ -127,6 +127,14 @@ class FastSpeech2ConformerModelTest(ModelTesterMixin, unittest.TestCase):
test_resize_embeddings = False
is_encoder_decoder = True

@skipIfRocm
def test_model_outputs_equivalence(self):
super().test_model_outputs_equivalence()

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

def setUp(self):
self.model_tester = FastSpeech2ConformerModelTester(self)
self.config_tester = ConfigTester(self, config_class=FastSpeech2ConformerConfig)
Expand Down Expand Up @@ -550,6 +558,10 @@ class FastSpeech2ConformerWithHifiGanTest(ModelTesterMixin, unittest.TestCase):
test_resize_embeddings = False
is_encoder_decoder = True

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

def setUp(self):
self.model_tester = FastSpeech2ConformerWithHifiGanTester(self)

Expand Down
7 changes: 6 additions & 1 deletion tests/models/flava/test_modeling_flava.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
FlavaMultimodalConfig,
FlavaTextConfig,
)
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.testing_utils import require_torch, require_vision, slow, torch_device, skipIfRocm
from transformers.utils import is_torch_available, is_vision_available

from ...test_configuration_common import ConfigTester
Expand Down Expand Up @@ -597,6 +597,11 @@ class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase):
test_resize_embeddings = False
test_torchscript = False


@skipIfRocm
def test_batching_equivalence(self):
super().test_batching_equivalence()

def setUp(self):
self.model_tester = FlavaMultimodalModelTester(self)
self.config_tester = ConfigTester(
Expand Down
3 changes: 2 additions & 1 deletion tests/models/informer/test_modeling_informer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from huggingface_hub import hf_hub_download

from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device, skipIfRocm

from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
Expand Down Expand Up @@ -221,6 +221,7 @@ def test_save_load_strict(self):
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])

@skipIfRocm
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
Expand Down
6 changes: 5 additions & 1 deletion tests/models/layoutlmv2/test_modeling_layoutlmv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

import unittest

from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device
from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device, skipIfRocm
from transformers.utils import is_detectron2_available, is_torch_available

from ...test_configuration_common import ConfigTester
Expand Down Expand Up @@ -274,6 +274,10 @@ class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
else {}
)

@skipIfRocm
def test_inputs_embeds_matches_input_ids(self):
super().test_inputs_embeds_matches_input_ids()

def setUp(self):
self.model_tester = LayoutLMv2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37)
Expand Down
4 changes: 3 additions & 1 deletion tests/models/mamba/test_modeling_mamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from parameterized import parameterized

from transformers import AutoTokenizer, MambaConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device
from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device, skipIfRocm

from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
Expand Down Expand Up @@ -305,6 +305,7 @@ def test_config(self):
self.config_tester.run_common_tests()

@require_torch_multi_gpu
@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

Expand Down Expand Up @@ -441,6 +442,7 @@ def setUp(self):
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)

@parameterized.expand([(torch_device,), ("cpu",)])
@skipIfRocm
def test_simple_generate(self, device):
tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf")
tokenizer.pad_token = tokenizer.eos_token
Expand Down
69 changes: 67 additions & 2 deletions tests/models/mra/test_modeling_mra.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import unittest

from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.testing_utils import require_torch, slow, torch_device, skipIfRocm

from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
Expand Down Expand Up @@ -314,36 +314,101 @@ class MraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def setUp(self):
self.model_tester = MraModelTester(self)
self.config_tester = ConfigTester(self, config_class=MraConfig, hidden_size=37)

def test_config(self):
self.config_tester.run_common_tests()


@skipIfRocm
def test_determinism(self):
super().test_determinism()

@skipIfRocm
def test_feed_forward_chunking(self):
super().test_feed_forward_chunking()

@skipIfRocm
def test_hidden_states_output(self):
super().test_hidden_states_output()

@skipIfRocm
def test_inputs_embeds(self):
super().test_inputs_embeds()

@skipIfRocm
def test_inputs_embeds_matches_input_ids(self):
super().test_inputs_embeds_matches_input_ids()

@skipIfRocm
def test_load_with_mismatched_shapes(self):
super().test_load_with_mismatched_shapes()

@skipIfRocm
def test_model_outputs_equivalence(self):
super().test_model_outputs_equivalence()

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

@skipIfRocm
def test_problem_types(self):
super().test_problem_types()

@skipIfRocm
def test_resize_embeddings_untied(self):
super().test_resize_embeddings_untied()

@skipIfRocm
def test_resize_tokens_embeddings(self):
super().test_resize_tokens_embeddings()

@skipIfRocm
def test_retain_grad_hidden_states_attentions(self):
super().test_retain_grad_hidden_states_attentions()

@skipIfRocm
def test_save_load(self):
super().test_save_load()

@skipIfRocm
def test_training(self):
super().test_training()


@skipIfRocm
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)

@skipIfRocm
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)

@skipIfRocm
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)

@skipIfRocm
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)

@skipIfRocm
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)

@skipIfRocm
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)

@skipIfRocm
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
Expand Down
9 changes: 9 additions & 0 deletions tests/models/musicgen/test_modeling_musicgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
require_torch_sdpa,
slow,
torch_device,
skipIfRocm
)
from transformers.utils import cached_property, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device

Expand Down Expand Up @@ -184,6 +185,10 @@ class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
test_pruning = False
test_resize_embeddings = False

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

def setUp(self):
self.model_tester = MusicgenDecoderTester(self)
self.config_tester = ConfigTester(self, config_class=MusicgenDecoderConfig, hidden_size=16)
Expand Down Expand Up @@ -1071,6 +1076,10 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
# (and `torchscript` hates `None` values).
test_torchscript = False

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

def setUp(self):
self.model_tester = MusicgenTester(self)

Expand Down
9 changes: 9 additions & 0 deletions tests/models/musicgen_melody/test_modeling_musicgen_melody.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
require_torchaudio,
slow,
torch_device,
skipIfRocm
)
from transformers.utils import cached_property, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device

Expand Down Expand Up @@ -184,6 +185,10 @@ class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittes
) # the model uses a custom generation method so we only run a specific subset of the generation tests
test_pruning = False
test_resize_embeddings = False

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

def setUp(self):
self.model_tester = MusicgenMelodyDecoderTester(self)
Expand Down Expand Up @@ -1075,6 +1080,10 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
# (and `torchscript` hates `None` values).
test_torchscript = False

@skipIfRocm
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()

def setUp(self):
self.model_tester = MusicgenMelodyTester(self)

Expand Down
Loading

0 comments on commit 9929ac0

Please sign in to comment.