From 9177ad47824f0b511fca33f8b032b868571e603f Mon Sep 17 00:00:00 2001 From: Molly Sophia Date: Tue, 22 Oct 2024 21:22:26 +0800 Subject: [PATCH] Rwkv chat template fix (#10001) * llama: remove useless template matching for rwkv-world Signed-off-by: Molly Sophia * converter: Add comment about the hack for rwkv models Signed-off-by: Molly Sophia * Update src/llama.cpp Co-authored-by: Xuan Son Nguyen --------- Signed-off-by: Molly Sophia Co-authored-by: Xuan Son Nguyen --- convert_hf_to_gguf.py | 1 + src/llama.cpp | 3 ++- tests/test-chat-template.cpp | 4 ---- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index e0b1b2bf99d6b..7e552a71b5c7c 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2865,6 +2865,7 @@ def set_vocab(self): self.gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False) special_vocab.chat_template = "rwkv-world" + # hack: Add '\n\n' as the EOT token to make it chat normally special_vocab._set_special_token("eot", 261) special_vocab.add_to_gguf(self.gguf_writer) diff --git a/src/llama.cpp b/src/llama.cpp index cee758d9799ba..e2e5eb9831d84 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -21703,7 +21703,8 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "[|assistant|]"; } - } else if (tmpl == "rwkv-world" || tmpl_contains("rwkv-world") || tmpl_contains("'User: ' + message['content'] + '\n\nAssistant:'")) { + } else if (tmpl == "rwkv-world" || tmpl_contains("rwkv-world")) { + // this template requires the model to have "\n\n" as EOT token for (auto message : chat) { std::string role(message->role); if (role == "user") { diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index fdc4a9bc3fd2f..6f046249fa1a8 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -65,8 +65,6 @@ int main(void) { u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + ''}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}", // DeepSeek-V2 "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", - // RWKV-World - "{% for message in messages %}{% if message['role'] == 'user' %}{{'User: ' + message['content'] + '\n\nAssistant:'}}{% else %}{{message['content'] + '\n\n'}}{% endif %}{% endfor %}", }; std::vector expected_output = { // teknium/OpenHermes-2.5-Mistral-7B @@ -111,8 +109,6 @@ int main(void) { u8"You are a helpful assistant<用户>HelloHi there<用户>Who are youI am an assistant<用户>Another question", // DeepSeek-V2 u8"You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:", - // RWKV-World - "You are a helpful assistant\n\nUser: Hello\n\nAssistant:Hi there\n\nUser: Who are you\n\nAssistant: I am an assistant \n\nUser: Another question\n\nAssistant:", }; std::vector formatted_chat(1024); int32_t res;