Skip to content

Commit

Permalink
tool-call: promote getting chat templates w/ dedicated script rathe…
Browse files Browse the repository at this point in the history
…r than rely on test resources
  • Loading branch information
ochafik committed Oct 2, 2024
1 parent f3538e7 commit 9e502e8
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 7 deletions.
12 changes: 6 additions & 6 deletions examples/agent/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
# Nous Hermes 2 Pro Llama 3 8B
./llama-server --jinja -fa --verbose \
-hfr NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF -hff Hermes-2-Pro-Llama-3-8B-Q8_0.gguf \
--chat-template-file tests/chat/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja
--chat-template "$( python scripts/get_hf_chat_template.py NousResearch/Hermes-2-Pro-Llama-3-8B tool_use )"

# Llama 3.1 8B
./llama-server --jinja -fa --verbose \
Expand All @@ -23,25 +23,25 @@
# functionary-small-v3
./llama-server --jinja -fa --verbose \
-hfr meetkai/functionary-small-v3.2-GGUF -hff functionary-small-v3.2.Q4_0.gguf \
--chat-template-file tests/chat/templates/meetkai-functionary-medium-v3.2.jinja
--chat-template "$( python scripts/get_hf_chat_template.py meetkai/functionary-medium-v3.2 )"

./llama-server --jinja -fa --verbose \
-m ~/Downloads/functionary-small-v3.2.Q4_0.gguf \
--chat-template-file tests/chat/templates/meetkai-functionary-medium-v3.2.jinja
--chat-template "$( python scripts/get_hf_chat_template.py meetkai/functionary-medium-v3.2 )"

# Llama 3.2 3B (poor adherence)
./llama-server --jinja -fa --verbose \
-hfr lmstudio-community/Llama-3.2-3B-Instruct-GGUF -hff Llama-3.2-3B-Instruct-Q6_K_L.gguf \
--chat-template-file tests/chat/templates/meta-llama-Llama-3.2-3B-Instruct.jinja
--chat-template "$( python scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct )"

./llama-server --jinja -fa --verbose \
-m ~/Downloads/Llama-3.2-3B-Instruct-Q6_K_L.gguf \
--chat-template-file tests/chat/templates/meta-llama-Llama-3.2-3B-Instruct.jinja
--chat-template "$( python scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct )"

# Llama 3.2 1B (very poor adherence)
./llama-server --jinja -fa --verbose \
-hfr lmstudio-community/Llama-3.2-1B-Instruct-GGUF -hff Llama-3.2-1B-Instruct-Q4_K_M.gguf \
--chat-template-file tests/chat/templates/meta-llama-Llama-3.2-3B-Instruct.jinja
--chat-template "$( python scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct )"
```

- Run the tools in [examples/agent/tools](./examples/agent/tools) inside a docker container (check http://localhost:8088/docs once running):
Expand Down
69 changes: 69 additions & 0 deletions scripts/get_hf_chat_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
'''
Fetches the Jinja chat template of a HuggingFace model.
If a model
Syntax:
get_hf_chat_template.py model_id [variant]
Examples:
python ./scripts/get_hf_chat_template.py NousResearch/Meta-Llama-3-8B-Instruct
python ./scripts/get_hf_chat_template.py NousResearch/Hermes-3-Llama-3.1-70B tool_use
python ./scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct
'''

import json
import re
import sys


def main(args):
if len(args) < 1:
raise ValueError("Please provide a model ID and an optional variant name")
model_id = args[0]
variant = None if len(args) < 2 else args[1]

try:
# Use huggingface_hub library if available.
# Allows access to gated models if the user has access and ran `huggingface-cli login`.
from huggingface_hub import hf_hub_download
with open(hf_hub_download(repo_id=model_id, filename="tokenizer_config.json")) as f:
config_str = f.read()
except ImportError:
import requests
assert re.match(r"^[\w.-]+/[\w.-]+$", model_id), f"Invalid model ID: {model_id}"
response = requests.get(f"https://huggingface.co/{model_id}/resolve/main/tokenizer_config.json")
if response.status_code == 401:
raise Exception('Access to this model is gated, please request access, authenticate with `huggingface-cli login` and make sure to run `pip install huggingface_hub`')
response.raise_for_status()
config_str = response.text

try:
config = json.loads(config_str)
except json.JSONDecodeError:
# Fix https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json
# (Remove extra '}' near the end of the file)
config = json.loads(re.sub(r'\}([\n\s]*\}[\n\s]*\],[\n\s]*"clean_up_tokenization_spaces")', r'\1', config_str))

chat_template = config['chat_template']
if isinstance(chat_template, str):
print(chat_template, end=None)
else:
variants = {
ct['name']: ct['template']
for ct in chat_template
}
format_variants = lambda: ', '.join(f'"{v}"' for v in variants.keys())

if variant is None:
if 'default' not in variants:
raise Exception(f'Please specify a chat template variant (one of {format_variants()})')
variant = 'default'
print(f'Note: picked "default" chat template variant (out of {format_variants()})', file=sys.stderr)
elif variant not in variants:
raise Exception(f"Variant {variant} not found in chat template (found {format_variants()})")

print(variants[variant], end=None)


if __name__ == '__main__':
main(sys.argv[1:])
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
Fetches the Jinja2 templates of a few known models and use them to generate prompt goldens for a few predefined chat contexts.
Examples:
python ./tests/update_jinja_goldens.py
python ./scripts/update_jinja_goldens.py
https://github.com/huggingface/transformers/blob/main/src/transformers/utils/chat_template_utils.py
'''
Expand Down

0 comments on commit 9e502e8

Please sign in to comment.