From 1d568dfab262f76079eb4f3d05b606d51a0c9e4b Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 22 May 2024 18:06:50 +0200 Subject: [PATCH] legacy to init the slow tokenizer when converting from slow was wrong (#30972) --- src/transformers/models/llama/tokenization_llama_fast.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/transformers/models/llama/tokenization_llama_fast.py b/src/transformers/models/llama/tokenization_llama_fast.py index 4a7ef126d41afe..580290841cfbef 100644 --- a/src/transformers/models/llama/tokenization_llama_fast.py +++ b/src/transformers/models/llama/tokenization_llama_fast.py @@ -151,9 +151,6 @@ def __init__( self.legacy = legacy if add_prefix_space is not None: - logger.warning_once( - "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers" - ) kwargs["from_slow"] = True super().__init__( @@ -166,6 +163,7 @@ def __init__( add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, + legacy=legacy, **kwargs, ) self._add_bos_token = add_bos_token