diff --git a/README.md b/README.md index 45df4ad..1ffc170 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,6 @@ from aleph_alpha_client import Client, CompletionRequest, Prompt client = Client(token=os.getenv("AA_TOKEN")) request = CompletionRequest( prompt=Prompt.from_text("Provide a short description of AI:"), - maximum_tokens=64, ) response = client.complete(request, model="luminous-extended") @@ -38,7 +37,6 @@ from aleph_alpha_client import AsyncClient, CompletionRequest, Prompt async with AsyncClient(token=os.environ["AA_TOKEN"]) as client: request = CompletionRequest( prompt=Prompt.from_text("Provide a short description of AI:"), - maximum_tokens=64, ) response = await client.complete(request, model="luminous-base") print(response.completions[0].completion) diff --git a/aleph_alpha_client/aleph_alpha_client.py b/aleph_alpha_client/aleph_alpha_client.py index a039e9b..4a2a118 100644 --- a/aleph_alpha_client/aleph_alpha_client.py +++ b/aleph_alpha_client/aleph_alpha_client.py @@ -146,9 +146,7 @@ class Client: Internal feature. Example usage: - >>> request = CompletionRequest( - prompt=Prompt.from_text(f"Request"), maximum_tokens=64 - ) + >>> request = CompletionRequest(prompt=Prompt.from_text(f"Request")) >>> client = Client(token=os.environ["AA_TOKEN"]) >>> response: CompletionResponse = client.complete(request, "luminous-base") """ diff --git a/aleph_alpha_client/completion.py b/aleph_alpha_client/completion.py index a98981b..9950f74 100644 --- a/aleph_alpha_client/completion.py +++ b/aleph_alpha_client/completion.py @@ -179,7 +179,7 @@ class CompletionRequest: Examples: >>> prompt = Prompt.from_text("Provide a short description of AI:") - >>> request = CompletionRequest(prompt=prompt, maximum_tokens=20) + >>> request = CompletionRequest(prompt=prompt) """ prompt: Prompt diff --git a/docs/source/index.rst b/docs/source/index.rst index 250c7a4..51c2ad9 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -20,7 +20,7 @@ Synchronous client. client = Client(token=os.getenv("AA_TOKEN")) prompt = Prompt.from_text("Provide a short description of AI:") - request = CompletionRequest(prompt=prompt, maximum_tokens=20) + request = CompletionRequest(prompt=prompt) result = client.complete(request, model="luminous-extended") print(result.completions[0].completion) @@ -36,7 +36,7 @@ Synchronous client with prompt containing an image. image = Image.from_file("path-to-an-image") prompt_template = PromptTemplate("{{image}}This picture shows ") prompt = prompt_template.to_prompt(image=prompt_template.placeholder(image)) - request = CompletionRequest(prompt=prompt, maximum_tokens=20) + request = CompletionRequest(prompt=prompt) result = client.complete(request, model="luminous-extended") print(result.completions[0].completion) @@ -51,10 +51,7 @@ Asynchronous client. # Can enter context manager within an async function async with AsyncClient(token=os.environ["AA_TOKEN"]) as client: - request = CompletionRequest( - prompt=Prompt.from_text("Request"), - maximum_tokens=64, - ) + request = CompletionRequest(prompt=Prompt.from_text("Request")) response = await client.complete(request, model="luminous-base") .. toctree::