Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Skip failing test_save_load_low_cpu_mem_usage tests #29110

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions tests/models/albert/test_modeling_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,12 @@ def test_model_from_pretrained(self):
model = AlbertModel.from_pretrained(model_name)
self.assertIsNotNone(model)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
class AlbertModelIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 6 additions & 0 deletions tests/models/deberta/test_modeling_deberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,12 @@ def test_model_from_pretrained(self):
model = DebertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
@require_sentencepiece
Expand Down
6 changes: 6 additions & 0 deletions tests/models/deberta_v2/test_modeling_deberta_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,12 @@ def test_model_from_pretrained(self):
model = DebertaV2Model.from_pretrained(model_name)
self.assertIsNotNone(model)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
@require_sentencepiece
Expand Down
6 changes: 6 additions & 0 deletions tests/models/encodec/test_modeling_encodec.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,12 @@ def test_identity_shortcut(self):
config.use_conv_shortcut = False
self.model_tester.create_and_check_model_forward(config, inputs_dict)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - TypeError: _weight_norm_interface() missing 1 required positional argument: 'dim'"
)
def test_save_load_low_cpu_mem_usage(self):
pass


def normalize(arr):
norm = np.linalg.norm(arr)
Expand Down
6 changes: 6 additions & 0 deletions tests/models/flava/test_modeling_flava.py
Original file line number Diff line number Diff line change
Expand Up @@ -1256,6 +1256,12 @@ def test_training_gradient_checkpointing_use_reentrant(self):
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


# We will verify our results on an image of cute cats
def prepare_img():
Expand Down
6 changes: 6 additions & 0 deletions tests/models/fnet/test_modeling_fnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,12 @@ def test_model_from_pretrained(self):
model = FNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
class FNetModelIntegrationTest(unittest.TestCase):
Expand Down
10 changes: 8 additions & 2 deletions tests/models/ibert/test_modeling_ibert.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,8 +380,14 @@ def test_inputs_embeds(self):
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)

with torch.no_grad():
model(**inputs)[0]
with torch.no_grad():
model(**inputs)[0]

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
Expand Down
6 changes: 6 additions & 0 deletions tests/models/lxmert/test_modeling_lxmert.py
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,12 @@ def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict):

return tf_inputs_dict

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
class LxmertModelIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 6 additions & 0 deletions tests/models/mobilebert/test_modeling_mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,12 @@ def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


def _long_tensor(tok_lst):
return torch.tensor(
Expand Down
6 changes: 6 additions & 0 deletions tests/models/realm/test_modeling_realm.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,12 @@ def test_scorer_from_pretrained(self):
model = RealmScorer.from_pretrained("google/realm-cc-news-pretrained-scorer")
self.assertIsNotNone(model)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
class RealmModelIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 6 additions & 0 deletions tests/models/roformer/test_modeling_roformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,12 @@ def test_training_gradient_checkpointing_use_reentrant(self):
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_torch
class RoFormerModelIntegrationTest(unittest.TestCase):
Expand Down
6 changes: 6 additions & 0 deletions tests/models/squeezebert/test_modeling_squeezebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,12 @@ def test_model_from_pretrained(self):
model = SqueezeBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)

@unittest.skip(
reason="Does not currently support low_cpu_mem_usage - NotImplementedError: Cannot copy out of meta tensor; no data!"
)
def test_save_load_low_cpu_mem_usage(self):
pass


@require_sentencepiece
@require_tokenizers
Expand Down
6 changes: 6 additions & 0 deletions tests/models/timm_backbone/test_modeling_timm_backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,3 +273,9 @@ def test_create_from_modified_config(self):
model.to(torch_device)
model.eval()
result = model(**inputs_dict)

@unittest.skip(
reason="Does not support low_cpu_mem_usage - weights are loaded from timm and not from transformers checkpoint."
)
def test_save_load_low_cpu_mem_usage(self):
pass
Loading