Skip to content

Commit

Permalink
Fix.
Browse files Browse the repository at this point in the history
  • Loading branch information
Narsil committed Dec 5, 2023
1 parent be481a4 commit cb8a168
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion server/text_generation_server/models/flash_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,7 @@ def generate_token(
batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs
)

speculative_length = speculative_ids.shape[1]
speculative_length = 0 if speculative_ids is None else speculative_ids.shape[1]
if prefill:
if len(batch) > 1 and prefill_logprobs:
# We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs
Expand Down
4 changes: 2 additions & 2 deletions server/text_generation_server/models/flash_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
FlashMistralForCausalLM,
MistralConfig,
)
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Expand Down Expand Up @@ -132,8 +133,7 @@ def from_pb(

# Paged attention
# Remove one as the first token des not have a past
from text_generation_server.models import SPECULATE
speculative_length = SPECULATE
speculative_length = get_speculate()
total_tokens = input_length + max_new_tokens - 1 + speculative_length

# Needed blocks can not go over SLIDING_WINDOW_BLOCKS
Expand Down

0 comments on commit cb8a168

Please sign in to comment.