diff --git a/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py b/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py index 7f09fe60ce8160..21d5ddb47e832a 100644 --- a/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py +++ b/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py @@ -1496,7 +1496,9 @@ def forward( else: # add zero embedding for padding tokens bsz, seq_len, dim = positions.size() - zero_emb = self.segment_emb(torch.zeros((bsz, 1), dtype=torch.long, device=self.segment_emb.weight.device)).to(positions.device) + zero_emb = self.segment_emb( + torch.zeros((bsz, 1), dtype=torch.long, device=self.segment_emb.weight.device) + ).to(positions.device) positions += zero_emb hidden_states = inputs_embeds + positions