Skip to content

Commit

Permalink
Llama family, fix use_cache=False generation (#30380)
Browse files Browse the repository at this point in the history
* nit to make sure cache positions are not sliced

* fix other models

* nit

* style
  • Loading branch information
ArthurZucker authored and Ita Zaporozhets committed May 14, 2024
1 parent c1c9acc commit 4aa72ca
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 12 deletions.
13 changes: 10 additions & 3 deletions src/transformers/models/cohere/modeling_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -1175,7 +1175,14 @@ def forward(
)

def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
Expand Down Expand Up @@ -1239,7 +1246,7 @@ def prepare_inputs_for_generation(
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]

if has_static_cache:
Expand All @@ -1250,7 +1257,7 @@ def prepare_inputs_for_generation(
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
Expand Down
13 changes: 10 additions & 3 deletions src/transformers/models/gemma/modeling_gemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -1157,7 +1157,14 @@ def forward(
)

def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
Expand Down Expand Up @@ -1221,7 +1228,7 @@ def prepare_inputs_for_generation(
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]

if has_static_cache:
Expand All @@ -1232,7 +1239,7 @@ def prepare_inputs_for_generation(
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
Expand Down
13 changes: 10 additions & 3 deletions src/transformers/models/llama/modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -1253,7 +1253,14 @@ def forward(
)

def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
Expand Down Expand Up @@ -1317,7 +1324,7 @@ def prepare_inputs_for_generation(
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]

if has_static_cache:
Expand All @@ -1328,7 +1335,7 @@ def prepare_inputs_for_generation(
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
Expand Down
13 changes: 10 additions & 3 deletions src/transformers/models/olmo/modeling_olmo.py
Original file line number Diff line number Diff line change
Expand Up @@ -1234,7 +1234,14 @@ def forward(
)

def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
use_cache=True,
**kwargs,
):
# With static cache, the `past_key_values` is None
# TODO joao: standardize interface for the different Cache classes and remove of this if
Expand Down Expand Up @@ -1298,7 +1305,7 @@ def prepare_inputs_for_generation(
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
if cache_position is None:
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
else:
elif use_cache:
cache_position = cache_position[-input_length:]

if has_static_cache:
Expand All @@ -1309,7 +1316,7 @@ def prepare_inputs_for_generation(
"position_ids": position_ids,
"cache_position": cache_position,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"use_cache": use_cache,
"attention_mask": attention_mask,
}
)
Expand Down

0 comments on commit 4aa72ca

Please sign in to comment.