From 23ddfa5300547065c3c62997813747b9f0c3d49a Mon Sep 17 00:00:00 2001 From: "[[ -z $EMAIL ]] && read -e -p \"Enter your email (for git configuration): \" EMAIL" Date: Tue, 9 Jan 2024 14:50:46 -0500 Subject: [PATCH 1/2] Fix test --- tests/trainer/test_trainer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 0ab0b781126da0..c2dc8c2dfb0b5f 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -77,6 +77,7 @@ require_torch_up_to_2_accelerators, require_torchdynamo, require_wandb, + skip, slow, torch_device, ) @@ -1458,6 +1459,7 @@ def test_can_resume_training(self): trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) + @skip("@muellerzr: Fix once Trainer can take an accelerate configuration. Need to set `seedable_sampler=True`.") def test_resume_training_with_randomness(self): # For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used # in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes From a0dac66d178e2fd0599edc49c8c7cc3e48521aa3 Mon Sep 17 00:00:00 2001 From: "[[ -z $EMAIL ]] && read -e -p \"Enter your email (for git configuration): \" EMAIL" Date: Tue, 9 Jan 2024 15:12:47 -0500 Subject: [PATCH 2/2] Skip --- tests/trainer/test_trainer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index c2dc8c2dfb0b5f..813312c53338ac 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -77,7 +77,6 @@ require_torch_up_to_2_accelerators, require_torchdynamo, require_wandb, - skip, slow, torch_device, ) @@ -1459,7 +1458,9 @@ def test_can_resume_training(self): trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) - @skip("@muellerzr: Fix once Trainer can take an accelerate configuration. Need to set `seedable_sampler=True`.") + @unittest.skip( + reason="@muellerzr: Fix once Trainer can take an accelerate configuration. Need to set `seedable_sampler=True`." + ) def test_resume_training_with_randomness(self): # For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used # in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes