-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Support chat response format (#2046)
* feat: support response_format in chat * fix: adjust typos * fix: add trufflehog lint
- Loading branch information
Showing
5 changed files
with
156 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,4 +16,3 @@ jobs: | |
fetch-depth: 0 | ||
- name: Secret Scanning | ||
uses: trufflesecurity/trufflehog@main | ||
|
23 changes: 23 additions & 0 deletions
23
...apshots__/test_grammar_response_format_llama/test_grammar_response_format_llama_json.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
{ | ||
"choices": [ | ||
{ | ||
"finish_reason": "eos_token", | ||
"index": 0, | ||
"logprobs": null, | ||
"message": { | ||
"content": "{\n \"temperature\": [\n 35,\n 34,\n 36\n ],\n \"unit\": \"°c\"\n}", | ||
"role": "assistant" | ||
} | ||
} | ||
], | ||
"created": 1718044128, | ||
"id": "", | ||
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", | ||
"object": "text_completion", | ||
"system_fingerprint": "2.0.5-dev0-native", | ||
"usage": { | ||
"completion_tokens": 39, | ||
"prompt_tokens": 136, | ||
"total_tokens": 175 | ||
} | ||
} |
101 changes: 101 additions & 0 deletions
101
integration-tests/models/test_grammar_response_format_llama.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
import pytest | ||
import requests | ||
from pydantic import BaseModel | ||
from typing import List | ||
|
||
|
||
@pytest.fixture(scope="module") | ||
def llama_grammar_handle(launcher): | ||
with launcher( | ||
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", | ||
num_shard=1, | ||
disable_grammar_support=False, | ||
use_flash_attention=False, | ||
max_batch_prefill_tokens=3000, | ||
) as handle: | ||
yield handle | ||
|
||
|
||
@pytest.fixture(scope="module") | ||
async def llama_grammar(llama_grammar_handle): | ||
await llama_grammar_handle.health(300) | ||
return llama_grammar_handle.client | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_grammar_response_format_llama_json(llama_grammar, response_snapshot): | ||
|
||
class Weather(BaseModel): | ||
unit: str | ||
temperature: List[int] | ||
|
||
# send the request | ||
response = requests.post( | ||
f"{llama_grammar.base_url}/v1/chat/completions", | ||
headers=llama_grammar.headers, | ||
json={ | ||
"model": "tgi", | ||
"messages": [ | ||
{ | ||
"role": "system", | ||
"content": f"Respond to the users questions and answer them in the following format: {Weather.schema()}", | ||
}, | ||
{ | ||
"role": "user", | ||
"content": "What's the weather like the next 3 days in San Francisco, CA?", | ||
}, | ||
], | ||
"seed": 42, | ||
"max_tokens": 500, | ||
"response_format": {"type": "json_object", "value": Weather.schema()}, | ||
}, | ||
) | ||
|
||
chat_completion = response.json() | ||
called = chat_completion["choices"][0]["message"]["content"] | ||
|
||
assert response.status_code == 200 | ||
assert ( | ||
called | ||
== '{\n "temperature": [\n 35,\n 34,\n 36\n ],\n "unit": "°c"\n}' | ||
) | ||
assert chat_completion == response_snapshot | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_grammar_response_format_llama_error_if_tools_not_installed( | ||
llama_grammar, | ||
): | ||
class Weather(BaseModel): | ||
unit: str | ||
temperature: List[int] | ||
|
||
# send the request | ||
response = requests.post( | ||
f"{llama_grammar.base_url}/v1/chat/completions", | ||
headers=llama_grammar.headers, | ||
json={ | ||
"model": "tgi", | ||
"messages": [ | ||
{ | ||
"role": "system", | ||
"content": f"Respond to the users questions and answer them in the following format: {Weather.schema()}", | ||
}, | ||
{ | ||
"role": "user", | ||
"content": "What's the weather like the next 3 days in San Francisco, CA?", | ||
}, | ||
], | ||
"seed": 42, | ||
"max_tokens": 500, | ||
"tools": [], | ||
"response_format": {"type": "json_object", "value": Weather.schema()}, | ||
}, | ||
) | ||
|
||
# 422 means the server was unable to process the request because it contains invalid data. | ||
assert response.status_code == 422 | ||
assert response.json() == { | ||
"error": "Grammar and tools are mutually exclusive", | ||
"error_type": "grammar and tools", | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters