From 6f841492bbbd6b2b2924cf934dfb5cfbc65886b8 Mon Sep 17 00:00:00 2001 From: Konstantin Dobler Date: Tue, 14 Nov 2023 19:10:41 +0100 Subject: [PATCH] correctly handle bytefallback tokens --- src/deepfocus/vocab_helper.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/deepfocus/vocab_helper.py b/src/deepfocus/vocab_helper.py index 1eaea19..5c2cc20 100644 --- a/src/deepfocus/vocab_helper.py +++ b/src/deepfocus/vocab_helper.py @@ -2,6 +2,7 @@ from dataclasses import dataclass import numpy as np +import regex from torch import Tensor from tqdm import tqdm from transformers import PreTrainedTokenizer @@ -59,6 +60,11 @@ def replace_space(tokenizer: PreTrainedTokenizer, token_id: int): """For XLM-R tokenizer (sentencepiece-style)""" decoded_token = tokenizer.decode(token_id) token = tokenizer.convert_ids_to_tokens(token_id) + + # For sentencepiece ByteFallback tokens used in Llama, Mistral et al. + if regex.match(r"<0x[0-9,A-F]{2}>", token): + return token, False + is_beginning_of_word = token.startswith(XLMR_WHITESPACE) if is_beginning_of_word: return XLMR_WHITESPACE + decoded_token.lstrip(), True