From d08855c2e7e49878f8f80af9c590778f77dbe651 Mon Sep 17 00:00:00 2001 From: Phillip Kuznetsov Date: Tue, 29 Oct 2024 13:21:14 -0700 Subject: [PATCH] cleanup(Mask2Former): Remove level_start_index parameter in Mask2former Signed-off-by: Phillip Kuznetsov --- .../models/mask2former/modeling_mask2former.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index f4aea415adf5e6..614bbc825219f2 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -927,7 +927,6 @@ def forward( position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, - level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys @@ -1002,7 +1001,6 @@ def forward( position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, - level_start_index=None, output_attentions: bool = False, ): """ @@ -1017,8 +1015,6 @@ def forward( Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. - level_start_index (`torch.LongTensor`, *optional*): - Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. @@ -1034,7 +1030,6 @@ def forward( position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, - level_start_index=level_start_index, output_attentions=output_attentions, ) @@ -1123,7 +1118,6 @@ def forward( attention_mask=None, position_embeddings=None, spatial_shapes=None, - level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, @@ -1142,8 +1136,6 @@ def forward( Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. - level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): - Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): @@ -1177,7 +1169,6 @@ def forward( position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, - level_start_index=level_start_index, output_attentions=output_attentions, ) @@ -1321,7 +1312,6 @@ def forward( attention_mask=masks_flat, position_embeddings=level_pos_embed_flat, spatial_shapes=spatial_shapes, - level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states,