Skip to content

Commit

Permalink
Return num_tokens_prompt_total for evaluation
Browse files Browse the repository at this point in the history
  • Loading branch information
Jakob Herpel committed Jan 15, 2024
1 parent 94aae05 commit 2e35793
Show file tree
Hide file tree
Showing 5 changed files with 69 additions and 6 deletions.
5 changes: 5 additions & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Changelog

## 7.0.0

- Added `num_tokens_prompt_total` to `EvaluationResponse`
- HTTP API version 1.16.0 or higher is required.

## 6.0.0

- Added `num_tokens_prompt_total` to the types below.
Expand Down
4 changes: 3 additions & 1 deletion aleph_alpha_client/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,15 @@ def _asdict(self) -> Mapping[str, Any]:
@dataclass(frozen=True)
class EvaluationResponse:
model_version: str
message: Optional[str]
result: Dict[str, Any]
num_tokens_prompt_total: int
message: Optional[str]

@staticmethod
def from_json(json: Dict[str, Any]) -> "EvaluationResponse":
return EvaluationResponse(
model_version=json["model_version"],
result=json["result"],
num_tokens_prompt_total=json["num_tokens_prompt_total"],
message=json.get("message"),
)
4 changes: 2 additions & 2 deletions aleph_alpha_client/version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__version__ = "6.0.0"
MIN_API_VERSION = "1.15.0"
__version__ = "7.0.0"
MIN_API_VERSION = "1.16.0"
54 changes: 54 additions & 0 deletions tests/demo_num_tokens_prompt_total.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"5\n"
]
}
],
"source": [
"import os\n",
"from aleph_alpha_client import AsyncClient, EmbeddingRequest, Prompt\n",
"\n",
"\n",
"TOKEN = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjoxMzQwMSwidG9rZW5faWQiOjQ0NDJ9.fFIpe4VOgYfmW0UfN5W2eoevp3EjQ5_rC1tv1R_1Jqk\"\n",
"\n",
"# Can enter context manager within an async function\n",
"async with AsyncClient(token=TOKEN) as client:\n",
" request = EmbeddingRequest(\n",
" prompt=Prompt.from_text(\"hello this is a prompt\"), layers=[-1], pooling=[\"mean\"], tokens=True\n",
" )\n",
" response = await client.embed(request, model=\"luminous-base\")\n",
"print(response.num_tokens_prompt_total)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
8 changes: 5 additions & 3 deletions tests/test_evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ async def test_can_evaluate_with_async_client(
response = await async_client.evaluate(request, model=model_name)
assert response.model_version is not None
assert response.result is not None
assert response.num_tokens_prompt_total >= 1


# Client
Expand All @@ -34,7 +35,8 @@ def test_evaluate(sync_client: Client, model_name: str):
prompt=Prompt.from_text("hello"), completion_expected="world"
)

result = sync_client.evaluate(request, model=model_name)
response = sync_client.evaluate(request, model=model_name)

assert result.model_version is not None
assert result.result is not None
assert response.model_version is not None
assert response.result is not None
assert response.num_tokens_prompt_total >= 1

0 comments on commit 2e35793

Please sign in to comment.