Skip to content

Commit

Permalink
docs: do not specify max tokens in examples
Browse files Browse the repository at this point in the history
update the examples where the specification of max tokens was not serving a purpose, do not touch examples where it was demonstrating how to specify it
  • Loading branch information
moldhouse committed Aug 9, 2024
1 parent 56e721e commit 7e67fe4
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 12 deletions.
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ from aleph_alpha_client import Client, CompletionRequest, Prompt
client = Client(token=os.getenv("AA_TOKEN"))
request = CompletionRequest(
prompt=Prompt.from_text("Provide a short description of AI:"),
maximum_tokens=64,
)
response = client.complete(request, model="luminous-extended")

Expand All @@ -38,7 +37,6 @@ from aleph_alpha_client import AsyncClient, CompletionRequest, Prompt
async with AsyncClient(token=os.environ["AA_TOKEN"]) as client:
request = CompletionRequest(
prompt=Prompt.from_text("Provide a short description of AI:"),
maximum_tokens=64,
)
response = await client.complete(request, model="luminous-base")
print(response.completions[0].completion)
Expand Down
4 changes: 1 addition & 3 deletions aleph_alpha_client/aleph_alpha_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,9 +146,7 @@ class Client:
Internal feature.
Example usage:
>>> request = CompletionRequest(
prompt=Prompt.from_text(f"Request"), maximum_tokens=64
)
>>> request = CompletionRequest(prompt=Prompt.from_text(f"Request"))
>>> client = Client(token=os.environ["AA_TOKEN"])
>>> response: CompletionResponse = client.complete(request, "luminous-base")
"""
Expand Down
2 changes: 1 addition & 1 deletion aleph_alpha_client/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ class CompletionRequest:
Examples:
>>> prompt = Prompt.from_text("Provide a short description of AI:")
>>> request = CompletionRequest(prompt=prompt, maximum_tokens=20)
>>> request = CompletionRequest(prompt=prompt)
"""

prompt: Prompt
Expand Down
9 changes: 3 additions & 6 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Synchronous client.
client = Client(token=os.getenv("AA_TOKEN"))
prompt = Prompt.from_text("Provide a short description of AI:")
request = CompletionRequest(prompt=prompt, maximum_tokens=20)
request = CompletionRequest(prompt=prompt)
result = client.complete(request, model="luminous-extended")
print(result.completions[0].completion)
Expand All @@ -36,7 +36,7 @@ Synchronous client with prompt containing an image.
image = Image.from_file("path-to-an-image")
prompt_template = PromptTemplate("{{image}}This picture shows ")
prompt = prompt_template.to_prompt(image=prompt_template.placeholder(image))
request = CompletionRequest(prompt=prompt, maximum_tokens=20)
request = CompletionRequest(prompt=prompt)
result = client.complete(request, model="luminous-extended")
print(result.completions[0].completion)
Expand All @@ -51,10 +51,7 @@ Asynchronous client.
# Can enter context manager within an async function
async with AsyncClient(token=os.environ["AA_TOKEN"]) as client:
request = CompletionRequest(
prompt=Prompt.from_text("Request"),
maximum_tokens=64,
)
request = CompletionRequest(prompt=Prompt.from_text("Request"))
response = await client.complete(request, model="luminous-base")
.. toctree::
Expand Down

0 comments on commit 7e67fe4

Please sign in to comment.