diff --git a/src/transformers/models/mpt/modeling_mpt.py b/src/transformers/models/mpt/modeling_mpt.py index a79e952aaf6779..74f214c4fcab75 100644 --- a/src/transformers/models/mpt/modeling_mpt.py +++ b/src/transformers/models/mpt/modeling_mpt.py @@ -70,10 +70,10 @@ def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device= base = base * (alibi_bias_max / num_heads_power_of_2) slopes = 1.0 / torch.pow(2, base) - slopes = slopes.view(1, num_heads, 1, 1) + slopes = slopes.view(1, num_heads_power_of_2, 1, 1) if num_heads_power_of_2 != num_heads: - slopes = torch.concat([slopes[1::2], slopes[::2]])[:num_heads] + slopes = torch.concat([slopes[:, 1::2, ...], slopes[:, ::2, ...]], dim=1)[:, :num_heads, ...] alibi = alibi * slopes return alibi.squeeze(0) diff --git a/tests/models/mpt/test_modeling_mpt.py b/tests/models/mpt/test_modeling_mpt.py index c2d3ae0d0111e9..e70b344d8c95a7 100644 --- a/tests/models/mpt/test_modeling_mpt.py +++ b/tests/models/mpt/test_modeling_mpt.py @@ -53,7 +53,7 @@ def __init__( use_labels=True, use_mc_token_ids=True, vocab_size=99, - hidden_size=32, + hidden_size=48, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, @@ -385,6 +385,12 @@ def test_mpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model(*config_and_inputs) + def test_mpt_model_alibi_tensor(self): + # test creation of alibi tensor when num heads is not a power of two + config_and_inputs = self.model_tester.prepare_config_and_inputs() + config_and_inputs[0].n_heads = 6 + self.model_tester.create_and_check_mpt_model(*config_and_inputs) + def test_mpt_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past(*config_and_inputs)