diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 1256c98b5de5d6..8d22daafa7dd8b 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -705,7 +705,7 @@ def __init__(self, config, lxmert_model_embedding_weights): self.decoder.weight = lxmert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0))) - def maybe_resize_bias(self, new_size: int): + def _maybe_resize_bias(self, new_size: int): if new_size != self.bias.shape[0]: self.bias.data = nn.functional.pad(self.bias.data, (0, new_size - self.bias.shape[0]), "constant", 0) @@ -1082,7 +1082,7 @@ def __init__(self, config): def _tie_weights(self): self.cls.predictions.decoder.weight = self.lxmert.embeddings.word_embeddings.weight - self.cls.predictions.maybe_resize_bias(self.lxmert.embeddings.word_embeddings.weight.shape[0]) + self.cls.predictions._maybe_resize_bias(self.lxmert.embeddings.word_embeddings.weight.shape[0]) def resize_num_qa_labels(self, num_labels): """