Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add dev instructions to readme #154

Merged
merged 2 commits into from
Dec 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 22 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ async with AsyncClient(token=os.environ["AA_TOKEN"]) as client:
prompt=Prompt.from_text("Provide a short description of AI:"),
maximum_tokens=64,
)
response = await client.complete(request, model="luminous-base")
response = await client.complete(request, model="luminous-base")
print(response.completions[0].completion)
```

Expand All @@ -66,6 +66,27 @@ pip install aleph-alpha-client

Get started using the client by first [creating an account](https://app.aleph-alpha.com/signup). Afterwards head over to [your profile](https://app.aleph-alpha.com/profile) to create an API token. Read more about how you can manage your API tokens [here](https://docs.aleph-alpha.com/docs/account).

## Development

For local development, start by creating a Python virtual environment as follows:

```
python3 -m venv venv
. ./venv/bin/activate
```

Next, install the `test` and `dev` dependencies:

```
pip install -e ".[test,dev]"
```

Now you should be able to ...

* run all the tests using `pytest` or, `pytest -k <test_name>` to run a specific test
* typecheck the code and tests using `mypy aleph_alpha_client` resp. `mypy tests`
* format the code using `black .`

## Links

- [HTTP API Docs](https://docs.aleph-alpha.com/api/)
Expand Down
6 changes: 3 additions & 3 deletions aleph_alpha_client/aleph_alpha_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def __init__(
request_timeout_seconds: int = DEFAULT_REQUEST_TIMEOUT,
total_retries: int = 8,
nice: bool = False,
verify_ssl = True,
verify_ssl=True,
) -> None:
if host[-1] != "/":
host += "/"
Expand Down Expand Up @@ -625,7 +625,7 @@ def __init__(
request_timeout_seconds: int = DEFAULT_REQUEST_TIMEOUT,
total_retries: int = 8,
nice: bool = False,
verify_ssl = True,
verify_ssl=True,
) -> None:
if host[-1] != "/":
host += "/"
Expand All @@ -652,7 +652,7 @@ def __init__(
"User-Agent": "Aleph-Alpha-Python-Client-"
+ aleph_alpha_client.__version__,
},
connector=connector
connector=connector,
)

async def close(self):
Expand Down
7 changes: 6 additions & 1 deletion tests/test_clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,12 @@ def test_nice_flag_on_client(httpserver: HTTPServer):
).respond_with_json(
CompletionResponse(
"model_version",
[CompletionResult(log_probs=[], completion="foo", )],
[
CompletionResult(
log_probs=[],
completion="foo",
)
],
num_tokens_prompt_total=2,
num_tokens_generated=1,
).to_json()
Expand Down
22 changes: 12 additions & 10 deletions tests/test_complete.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,33 +128,35 @@ def test_complete_with_echo(sync_client: Client, model_name: str, prompt_image:
assert completion_result.log_probs is not None
assert len(completion_result.log_probs) > 0


@pytest.mark.system_test
def test_num_tokens_prompt_total_with_best_of(sync_client: Client, model_name: str):
tokens = [49222, 2998] # Hello world
tokens = [49222, 2998] # Hello world
best_of = 2
request = CompletionRequest(
prompt = Prompt.from_tokens(tokens),
best_of = best_of,
maximum_tokens = 1,
prompt=Prompt.from_tokens(tokens),
best_of=best_of,
maximum_tokens=1,
)

response = sync_client.complete(request, model=model_name)
assert response.num_tokens_prompt_total == len(tokens) * best_of


@pytest.mark.system_test
def test_num_tokens_generated_with_best_of(sync_client: Client, model_name: str):
hello_world = [49222, 2998] # Hello world
hello_world = [49222, 2998] # Hello world
best_of = 2
request = CompletionRequest(
prompt = Prompt.from_tokens(hello_world),
best_of = best_of,
maximum_tokens = 1,
tokens = True,
prompt=Prompt.from_tokens(hello_world),
best_of=best_of,
maximum_tokens=1,
tokens=True,
)

response = sync_client.complete(request, model=model_name)
completion_result = response.completions[0]
assert completion_result.completion_tokens is not None
number_tokens_completion = len(completion_result.completion_tokens)

assert response.num_tokens_generated == best_of * number_tokens_completion
assert response.num_tokens_generated == best_of * number_tokens_completion
8 changes: 7 additions & 1 deletion tests/test_error_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,13 @@ def expect_retryable_error(

def expect_valid_completion(httpserver: HTTPServer) -> None:
httpserver.expect_ordered_request("/complete").respond_with_json(
{"model_version": "1", "completions": [], "num_tokens_prompt_total": 0, "num_tokens_generated": 0})
{
"model_version": "1",
"completions": [],
"num_tokens_prompt_total": 0,
"num_tokens_generated": 0,
}
)


def expect_valid_version(httpserver: HTTPServer) -> None:
Expand Down
1 change: 0 additions & 1 deletion tests/test_evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ async def test_can_evaluate_with_async_client(

@pytest.mark.system_test
def test_evaluate(sync_client: Client, model_name: str):

request = EvaluationRequest(
prompt=Prompt.from_text("hello"), completion_expected="world"
)
Expand Down
5 changes: 3 additions & 2 deletions tests/test_prompt_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,11 +164,12 @@ def test_to_prompt_works_with_tokens():

assert prompt == user_prompt


def test_to_prompt_resets_cache(prompt_image: Image):
user_prompt = Prompt([prompt_image, Text.from_text("Cool"), prompt_image])

template = PromptTemplate("{{user_prompt}}")

template.to_prompt(user_prompt=template.embed_prompt(user_prompt))

assert template.non_text_items == {}
assert template.non_text_items == {}
3 changes: 2 additions & 1 deletion tests/test_wildcard_import.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from aleph_alpha_client import *


def test_should_be_able_to_import_with_wildcard():
pass # Wildcard import can not go into the test itself
pass # Wildcard import can not go into the test itself