From 4aadd0b0af67dd5c8faa3987c88dc1419d1cbcb3 Mon Sep 17 00:00:00 2001 From: Dmitry Rogozhkin Date: Fri, 13 Dec 2024 12:20:42 -0800 Subject: [PATCH] ci: mark model_parallel tests as cuda specific `parallelize()` API is deprecated in favor of accelerate's `device_map="auto"` and therefore is not accepting new features. At the same time `parallelize()` implementation is currently CUDA-specific. This commit marks respective ci tests with `@require_torch_gpu`. Fixes: #35252 Signed-off-by: Dmitry Rogozhkin --- tests/test_modeling_common.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 13eacc4a596562..4315e67835c209 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -3046,6 +3046,7 @@ def test_multi_gpu_data_parallel_forward(self): with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) + @require_torch_gpu @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: @@ -3108,6 +3109,7 @@ def get_current_gpu_memory_use(): gc.collect() torch.cuda.empty_cache() + @require_torch_gpu @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: