Skip to content

Commit

Permalink
handle special tokens + add tests
Browse files Browse the repository at this point in the history
  • Loading branch information
younesbelkada committed Jun 20, 2024
1 parent 71105c0 commit b8aa283
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 7 deletions.
22 changes: 15 additions & 7 deletions src/transformers/integrations/ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,19 +609,12 @@ def tokenizer(self, proto):
self.additional_kwargs["bos_token"] = eos_token

if self.is_llama_3_tokenizer:
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(
add_prefix_space=False, trim_offsets=False, use_regex=True
)
self.additional_kwargs["add_prefix_space"] = False
self.additional_kwargs["clean_up_tokenization_spaces"] = True

self.additional_kwargs["legacy"] = False
self.original_tokenizer.legacy = False

# This is tricky as the additional kwargs are passed after legacy is force-set in LlamaTokenizer's
# init.
tokenizer.normalizer = normalizers.Sequence([])

return tokenizer

def decoder(self, replacement, add_prefix_space):
Expand All @@ -638,6 +631,21 @@ def decoder(self, replacement, add_prefix_space):
sequence += [decoders.Strip(content=" ", left=1)]
return decoders.Sequence(sequence)

def converted(self):
tokenizer = super().converted()

# HACK: patch the llama-3 tokenizer to use the correspinding pre-tokenizer
# and normalizer
if self.is_llama_3_tokenizer:
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(
add_prefix_space=False, trim_offsets=False, use_regex=True
)
# This is tricky as the additional kwargs are passed after legacy is force-set in LlamaTokenizer's
# init.
tokenizer.normalizer = normalizers.Sequence([])

return tokenizer


class GGUFQwen2Converter(Qwen2Converter):
def __init__(self, tokenizer_dict):
Expand Down
6 changes: 6 additions & 0 deletions tests/quantization/ggml/test_ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,12 @@ def test_qwen2_q4_0(self):
EXPECTED_TEXT = "Hello.jsoup\n\nI am a beginner"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)

def test_llama3_q4_0_tokenizer(self):
tokenizer_gguf = AutoTokenizer.from_pretrained(self.llama3_model_id, gguf_file=self.q4_llama3_model_id)
special_sentence = "สวัสดี"
predicted_text = tokenizer_gguf.decode(tokenizer_gguf.encode(special_sentence, return_tensors="pt")[0])
self.assertEqual(predicted_text, "<|begin_of_text|>" + special_sentence)

def test_llama3_q4_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.llama3_model_id, gguf_file=self.q4_llama3_model_id)
model = AutoModelForCausalLM.from_pretrained(
Expand Down

0 comments on commit b8aa283

Please sign in to comment.