diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 33c5fea7e322ae..211c87a834ee19 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2985,6 +2985,18 @@ def from_pretrained( **kwargs, ) else: + # In case one passes a config to `from_pretrained` + "attn_implementation" + # override the `_attn_implementation` attribute to `attn_implementation` of the kwargs + # Please see: https://github.com/huggingface/transformers/issues/28038 + + # Overwrite `config._attn_implementation` by the one from the kwargs --> in auto-factory + # we pop attn_implementation from the kwargs but this handles the case where users + # passes manually the config to `from_pretrained`. + config = copy.deepcopy(config) + + kwarg_attn_imp = kwargs.pop("attn_implementation", None) + if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp: + config._attn_implementation = kwarg_attn_imp model_kwargs = kwargs quantizer = None diff --git a/tests/test_modeling_utils.py b/tests/test_modeling_utils.py index ddfaad5214dc50..a8a483b4017c84 100755 --- a/tests/test_modeling_utils.py +++ b/tests/test_modeling_utils.py @@ -1823,6 +1823,16 @@ def test_error_no_flash_available(self): self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) + def test_error_no_flash_available_with_config(self): + with self.assertRaises(ValueError) as cm: + config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") + + _ = AutoModel.from_pretrained( + "hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2" + ) + + self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) + def test_error_wrong_attn_implementation(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo") @@ -1840,6 +1850,21 @@ def test_not_available_flash(self): self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) + def test_not_available_flash_with_config(self): + if is_flash_attn_2_available(): + self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") + + config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel") + + with self.assertRaises(ImportError) as cm: + _ = AutoModel.from_pretrained( + "hf-internal-testing/tiny-random-GPTBigCodeModel", + config=config, + attn_implementation="flash_attention_2", + ) + + self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) + def test_not_available_sdpa(self): if is_torch_sdpa_available(): self.skipTest("This test requires torch<=2.0")