From 372bb0591e0054282790042c975d10de9e82908b Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 21:51:18 +0000 Subject: [PATCH] SDK regeneration --- .github/workflows/tests.yml | 41 ++ poetry.lock | 45 +- pyproject.toml | 12 +- src/cohere/__init__.py | 4 +- src/cohere/base_client.py | 480 ++++++++++++++---- src/cohere/connectors/client.py | 48 +- src/cohere/core/client_wrapper.py | 2 +- src/cohere/core/http_client.py | 11 +- src/cohere/core/jsonable_encoder.py | 4 +- src/cohere/datasets/client.py | 66 ++- src/cohere/embed_jobs/client.py | 32 +- src/cohere/environment.py | 2 +- src/cohere/models/client.py | 8 +- .../types/chat_citation_generation_event.py | 1 + src/cohere/types/chat_data_metrics.py | 1 + .../chat_search_queries_generation_event.py | 1 + src/cohere/types/chat_search_results_event.py | 1 + src/cohere/types/chat_stream_end_event.py | 1 + src/cohere/types/chat_stream_start_event.py | 1 + .../types/chat_text_generation_event.py | 1 + .../types/chat_tool_calls_generation_event.py | 1 + src/cohere/types/classify_data_metrics.py | 1 + src/cohere/types/dataset.py | 1 + .../embed_by_type_response_embeddings.py | 1 + src/cohere/types/embed_response.py | 2 + src/cohere/types/finetune_dataset_metrics.py | 1 + src/cohere/types/generate_stream_end.py | 1 + src/cohere/types/generate_stream_error.py | 1 + src/cohere/types/generate_stream_text.py | 1 + .../types/generate_streamed_response.py | 3 + src/cohere/types/label_metric.py | 1 + src/cohere/types/reranker_data_metrics.py | 1 + src/cohere/types/streamed_chat_response.py | 7 + 33 files changed, 607 insertions(+), 177 deletions(-) create mode 100644 .github/workflows/tests.yml diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..4dd36af6f --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,41 @@ +name: Test SDK + +on: [push] +jobs: + compile: + runs-on: ubuntu-20.04 + steps: + - name: Checkout repo + uses: actions/checkout@v3 + - name: Set up python + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: Bootstrap poetry + run: | + curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 + - name: Install dependencies + run: poetry install + - name: Compile + run: poetry run mypy . + test: + runs-on: ubuntu-20.04 + steps: + - name: Checkout repo + uses: actions/checkout@v3 + - name: Set up python + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: Bootstrap poetry + run: | + curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1 + - name: Install dependencies + run: poetry install + + - name: Install Fern + run: npm install -g fern-api + + - name: Test + run: | + poetry run pytest . diff --git a/poetry.lock b/poetry.lock index 41674a6a7..a40aeadcb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -366,6 +366,49 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-asyncio" +version = "0.23.5.post1" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-asyncio-0.23.5.post1.tar.gz", hash = "sha256:b9a8806bea78c21276bc34321bbf234ba1b2ea5b30d9f0ce0f2dea45e4685813"}, + {file = "pytest_asyncio-0.23.5.post1-py3-none-any.whl", hash = "sha256:30f54d27774e79ac409778889880242b0403d09cabd65b727ce90fe92dd5d80e"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + [[package]] name = "sniffio" version = "1.3.1" @@ -402,4 +445,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "3c8fae8de68e5484c48073bf191e51acbe3b9a32fd98e6b5e4d165e42a7fc7aa" +content-hash = "9265776813ab39c8f5195d29e52270d0eaaeaef435e30903307bd1b52dd0eb95" diff --git a/pyproject.toml b/pyproject.toml index a7702a368..db3d0326c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "cohere" -version = "5.0.0a10" +version = "5.0.0a11" description = "" readme = "README.md" authors = [] @@ -17,6 +17,16 @@ typing_extensions = ">= 4.0.0" [tool.poetry.dev-dependencies] mypy = "^1.8.0" pytest = "^7.4.0" +pytest-asyncio = "^0.23.5" +python-dateutil = "^2.9.0" + +[tool.pytest.ini_options] +testpaths = [ "tests" ] +asyncio_mode = "auto" + +[tool.mypy] +plugins = ["pydantic.mypy"] + [build-system] requires = ["poetry-core"] diff --git a/src/cohere/__init__.py b/src/cohere/__init__.py index 52b625575..5e859592a 100644 --- a/src/cohere/__init__.py +++ b/src/cohere/__init__.py @@ -124,7 +124,7 @@ from .client import AsyncClient, Client from .datasets import DatasetsCreateResponse, DatasetsGetResponse, DatasetsGetUsageResponse, DatasetsListResponse from .embed_jobs import CreateEmbedJobRequestTruncate -from .environment import CohereEnvironment +from .environment import ClientEnvironment __all__ = [ "ApiMeta", @@ -169,7 +169,7 @@ "ClassifyResponseClassificationsItemClassificationType", "ClassifyResponseClassificationsItemLabelsValue", "Client", - "CohereEnvironment", + "ClientEnvironment", "CompatibleEndpoint", "Connector", "ConnectorAuthStatus", diff --git a/src/cohere/base_client.py b/src/cohere/base_client.py index 7bf5fbde9..3c49a9202 100644 --- a/src/cohere/base_client.py +++ b/src/cohere/base_client.py @@ -16,7 +16,7 @@ from .core.request_options import RequestOptions from .datasets.client import AsyncDatasetsClient, DatasetsClient from .embed_jobs.client import AsyncEmbedJobsClient, EmbedJobsClient -from .environment import CohereEnvironment +from .environment import ClientEnvironment from .errors.bad_request_error import BadRequestError from .errors.internal_server_error import InternalServerError from .errors.too_many_requests_error import TooManyRequestsError @@ -69,9 +69,9 @@ class BaseCohere: Parameters: - base_url: typing.Optional[str]. The base url to use for requests from the client. - - environment: CohereEnvironment. The environment to use for requests from the client. from .environment import CohereEnvironment + - environment: ClientEnvironment. The environment to use for requests from the client. from .environment import ClientEnvironment - Defaults to CohereEnvironment.PRODUCTION + Defaults to ClientEnvironment.PRODUCTION - client_name: typing.Optional[str]. @@ -81,9 +81,9 @@ class BaseCohere: - httpx_client: typing.Optional[httpx.Client]. The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -93,7 +93,7 @@ def __init__( self, *, base_url: typing.Optional[str] = None, - environment: CohereEnvironment = CohereEnvironment.PRODUCTION, + environment: ClientEnvironment = ClientEnvironment.PRODUCTION, client_name: typing.Optional[str] = None, token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("CO_API_KEY"), timeout: typing.Optional[float] = 60, @@ -152,6 +152,10 @@ def chat_stream( - chat_history: typing.Optional[typing.Sequence[ChatMessage]]. A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. + Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content. + + The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used. + - conversation_id: typing.Optional[str]. An alternative to `chat_history`. Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string. @@ -218,20 +222,20 @@ def chat_stream( - tools: typing.Optional[typing.Sequence[Tool]]. A list of available tools (functions) that the model may suggest invoking before producing a text response. - When `tools` is passed, The `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made - the `tool_calls` array will be empty. + When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - - tool_results: typing.Optional[typing.Sequence[ChatStreamRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to generate text and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. + - tool_results: typing.Optional[typing.Sequence[ChatStreamRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. + **Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list. ``` tool_results = [ { "call": { - "name": , - "parameters": { - : - } + "name": , + "parameters": { + : + } }, "outputs": [{ : @@ -243,6 +247,86 @@ def chat_stream( **Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text. - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere import ( + ChatConnector, + ChatMessage, + ChatStreamRequestConnectorsSearchOptions, + ChatStreamRequestPromptOverride, + ChatStreamRequestToolResultsItem, + Tool, + ToolCall, + ToolParameterDefinitionsValue, + ) + from cohere.client import Client + + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.chat_stream( + message="string", + model="string", + preamble="string", + chat_history=[ + ChatMessage( + role="CHATBOT", + message="string", + ) + ], + conversation_id="string", + prompt_truncation="OFF", + connectors=[ + ChatConnector( + id="string", + user_access_token="string", + continue_on_failure=True, + options={"string": {"key": "value"}}, + ) + ], + search_queries_only=True, + documents=[{"string": "string"}], + citation_quality="fast", + temperature=1.1, + max_tokens=1, + k=1, + p=1.1, + seed=1.1, + connectors_search_options=ChatStreamRequestConnectorsSearchOptions( + model={"key": "value"}, + temperature={"key": "value"}, + max_tokens={"key": "value"}, + preamble={"key": "value"}, + seed=1.1, + ), + prompt_override=ChatStreamRequestPromptOverride( + preamble={"key": "value"}, + task_description={"key": "value"}, + style_guide={"key": "value"}, + ), + frequency_penalty=1.1, + presence_penalty=1.1, + raw_prompting=True, + tools=[ + Tool( + name="string", + description="string", + parameter_definitions={ + "string": ToolParameterDefinitionsValue( + description="string", + type="string", + required=True, + ) + }, + ) + ], + tool_results=[ + ChatStreamRequestToolResultsItem( + call=ToolCall(), + outputs=[{"string": {"key": "value"}}], + ) + ], + ) """ _request: typing.Dict[str, typing.Any] = {"message": message, "stream": True} if model is not OMIT: @@ -360,6 +444,10 @@ def chat( - chat_history: typing.Optional[typing.Sequence[ChatMessage]]. A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. + Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content. + + The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used. + - conversation_id: typing.Optional[str]. An alternative to `chat_history`. Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string. @@ -426,20 +514,20 @@ def chat( - tools: typing.Optional[typing.Sequence[Tool]]. A list of available tools (functions) that the model may suggest invoking before producing a text response. - When `tools` is passed, The `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made - the `tool_calls` array will be empty. + When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - - tool_results: typing.Optional[typing.Sequence[ChatRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to generate text and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. + - tool_results: typing.Optional[typing.Sequence[ChatRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. + **Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list. ``` tool_results = [ { "call": { - "name": , - "parameters": { - : - } + "name": , + "parameters": { + : + } }, "outputs": [{ : @@ -453,9 +541,9 @@ def chat( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere import ChatMessage - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -626,6 +714,31 @@ def generate_stream( - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import Client + + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.generate_stream( + prompt="string", + model="string", + num_generations=1, + max_tokens=1, + truncate="NONE", + temperature=1.1, + seed=1.1, + preset="string", + end_sequences=["string"], + stop_sequences=["string"], + k=1, + p=1.1, + frequency_penalty=1.1, + presence_penalty=1.1, + return_likelihoods="GENERATION", + raw_prompting=True, + ) """ _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": True} if model is not OMIT: @@ -782,9 +895,9 @@ def generate( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -911,6 +1024,20 @@ def embed( If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import Client + + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.embed( + texts=["string"], + model="string", + input_type="search_document", + embedding_types=["float"], + truncate="NONE", + ) """ _request: typing.Dict[str, typing.Any] = {"texts": texts} if model is not OMIT: @@ -994,16 +1121,21 @@ def rerank( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) client.rerank( model="rerank-english-v2.0", query="What is the capital of the United States?", - documents=[], + documents=[ + "Carson City is the capital city of the American state of Nevada.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.", + "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.", + "Capital punishment (the death penalty) has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states.", + ], ) """ _request: typing.Dict[str, typing.Any] = {"query": query, "documents": documents} @@ -1081,9 +1213,9 @@ def classify( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere import ClassifyExample - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -1194,36 +1326,38 @@ def summarize( request_options: typing.Optional[RequestOptions] = None, ) -> SummarizeResponse: """ - > 🚧 Warning - > - > This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API. - - Generates a summary in English for a given text. + > 🚧 Warning + > + > This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API. - Parameters: - - text: str. The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English. + Generates a summary in English for a given text. - - length: typing.Optional[SummarizeRequestLength]. One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text. + Parameters: + - text: str. The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English. - - format: typing.Optional[SummarizeRequestFormat]. One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. + - length: typing.Optional[SummarizeRequestLength]. One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text. - - model: typing.Optional[str]. The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. + - format: typing.Optional[SummarizeRequestFormat]. One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. - - extractiveness: typing.Optional[SummarizeRequestExtractiveness]. One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. + - model: typing.Optional[str]. The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. - - temperature: typing.Optional[float]. Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. + - extractiveness: typing.Optional[SummarizeRequestExtractiveness]. One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. - - additional_command: typing.Optional[str]. A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" + - temperature: typing.Optional[float]. Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. - - request_options: typing.Optional[RequestOptions]. Request-specific configuration. - --- - from cohere.client import Cohere + - additional_command: typing.Optional[str]. A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" - client = Cohere(client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - client.summarize(text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases. + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import Client - The meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo - all variants. Analogues made from dairy alternatives, such as goat"s or sheep"s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', ) + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.summarize( + text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases.\n\nThe meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo\r all variants. Analogues made from dairy alternatives, such as goat\'s or sheep\'s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', + ) """ _request: typing.Dict[str, typing.Any] = {"text": text} if length is not OMIT: @@ -1287,9 +1421,9 @@ def tokenize( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -1358,9 +1492,9 @@ def detokenize( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -1415,9 +1549,9 @@ class AsyncBaseCohere: Parameters: - base_url: typing.Optional[str]. The base url to use for requests from the client. - - environment: CohereEnvironment. The environment to use for requests from the client. from .environment import CohereEnvironment + - environment: ClientEnvironment. The environment to use for requests from the client. from .environment import ClientEnvironment - Defaults to CohereEnvironment.PRODUCTION + Defaults to ClientEnvironment.PRODUCTION - client_name: typing.Optional[str]. @@ -1427,9 +1561,9 @@ class AsyncBaseCohere: - httpx_client: typing.Optional[httpx.AsyncClient]. The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -1439,7 +1573,7 @@ def __init__( self, *, base_url: typing.Optional[str] = None, - environment: CohereEnvironment = CohereEnvironment.PRODUCTION, + environment: ClientEnvironment = ClientEnvironment.PRODUCTION, client_name: typing.Optional[str] = None, token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("CO_API_KEY"), timeout: typing.Optional[float] = 60, @@ -1498,6 +1632,10 @@ async def chat_stream( - chat_history: typing.Optional[typing.Sequence[ChatMessage]]. A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. + Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content. + + The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used. + - conversation_id: typing.Optional[str]. An alternative to `chat_history`. Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string. @@ -1564,20 +1702,20 @@ async def chat_stream( - tools: typing.Optional[typing.Sequence[Tool]]. A list of available tools (functions) that the model may suggest invoking before producing a text response. - When `tools` is passed, The `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made - the `tool_calls` array will be empty. + When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - - tool_results: typing.Optional[typing.Sequence[ChatStreamRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to generate text and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. + - tool_results: typing.Optional[typing.Sequence[ChatStreamRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. + **Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list. ``` tool_results = [ { "call": { - "name": , - "parameters": { - : - } + "name": , + "parameters": { + : + } }, "outputs": [{ : @@ -1589,6 +1727,86 @@ async def chat_stream( **Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text. - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere import ( + ChatConnector, + ChatMessage, + ChatStreamRequestConnectorsSearchOptions, + ChatStreamRequestPromptOverride, + ChatStreamRequestToolResultsItem, + Tool, + ToolCall, + ToolParameterDefinitionsValue, + ) + from cohere.client import AsyncClient + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + await client.chat_stream( + message="string", + model="string", + preamble="string", + chat_history=[ + ChatMessage( + role="CHATBOT", + message="string", + ) + ], + conversation_id="string", + prompt_truncation="OFF", + connectors=[ + ChatConnector( + id="string", + user_access_token="string", + continue_on_failure=True, + options={"string": {"key": "value"}}, + ) + ], + search_queries_only=True, + documents=[{"string": "string"}], + citation_quality="fast", + temperature=1.1, + max_tokens=1, + k=1, + p=1.1, + seed=1.1, + connectors_search_options=ChatStreamRequestConnectorsSearchOptions( + model={"key": "value"}, + temperature={"key": "value"}, + max_tokens={"key": "value"}, + preamble={"key": "value"}, + seed=1.1, + ), + prompt_override=ChatStreamRequestPromptOverride( + preamble={"key": "value"}, + task_description={"key": "value"}, + style_guide={"key": "value"}, + ), + frequency_penalty=1.1, + presence_penalty=1.1, + raw_prompting=True, + tools=[ + Tool( + name="string", + description="string", + parameter_definitions={ + "string": ToolParameterDefinitionsValue( + description="string", + type="string", + required=True, + ) + }, + ) + ], + tool_results=[ + ChatStreamRequestToolResultsItem( + call=ToolCall(), + outputs=[{"string": {"key": "value"}}], + ) + ], + ) """ _request: typing.Dict[str, typing.Any] = {"message": message, "stream": True} if model is not OMIT: @@ -1706,6 +1924,10 @@ async def chat( - chat_history: typing.Optional[typing.Sequence[ChatMessage]]. A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`. + Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content. + + The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used. + - conversation_id: typing.Optional[str]. An alternative to `chat_history`. Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string. @@ -1772,20 +1994,20 @@ async def chat( - tools: typing.Optional[typing.Sequence[Tool]]. A list of available tools (functions) that the model may suggest invoking before producing a text response. - When `tools` is passed, The `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made - the `tool_calls` array will be empty. + When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty. - - tool_results: typing.Optional[typing.Sequence[ChatRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to generate text and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. + - tool_results: typing.Optional[typing.Sequence[ChatRequestToolResultsItem]]. A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well. Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries. + **Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list. ``` tool_results = [ { "call": { - "name": , - "parameters": { - : - } + "name": , + "parameters": { + : + } }, "outputs": [{ : @@ -1799,9 +2021,9 @@ async def chat( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere import ChatMessage - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -1972,6 +2194,31 @@ async def generate_stream( - raw_prompting: typing.Optional[bool]. When enabled, the user's prompt will be sent to the model without any pre-processing. - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import AsyncClient + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + await client.generate_stream( + prompt="string", + model="string", + num_generations=1, + max_tokens=1, + truncate="NONE", + temperature=1.1, + seed=1.1, + preset="string", + end_sequences=["string"], + stop_sequences=["string"], + k=1, + p=1.1, + frequency_penalty=1.1, + presence_penalty=1.1, + return_likelihoods="GENERATION", + raw_prompting=True, + ) """ _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": True} if model is not OMIT: @@ -2128,9 +2375,9 @@ async def generate( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -2257,6 +2504,20 @@ async def embed( If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import AsyncClient + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + await client.embed( + texts=["string"], + model="string", + input_type="search_document", + embedding_types=["float"], + truncate="NONE", + ) """ _request: typing.Dict[str, typing.Any] = {"texts": texts} if model is not OMIT: @@ -2340,16 +2601,21 @@ async def rerank( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) await client.rerank( model="rerank-english-v2.0", query="What is the capital of the United States?", - documents=[], + documents=[ + "Carson City is the capital city of the American state of Nevada.", + "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan.", + "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.", + "Capital punishment (the death penalty) has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states.", + ], ) """ _request: typing.Dict[str, typing.Any] = {"query": query, "documents": documents} @@ -2427,9 +2693,9 @@ async def classify( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- from cohere import ClassifyExample - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -2540,36 +2806,38 @@ async def summarize( request_options: typing.Optional[RequestOptions] = None, ) -> SummarizeResponse: """ - > 🚧 Warning - > - > This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API. - - Generates a summary in English for a given text. + > 🚧 Warning + > + > This API is marked as "Legacy" and is no longer maintained. Follow the [migration guide](/docs/migrating-from-cogenerate-to-cochat) to start using the Chat API. - Parameters: - - text: str. The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English. + Generates a summary in English for a given text. - - length: typing.Optional[SummarizeRequestLength]. One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text. + Parameters: + - text: str. The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English. - - format: typing.Optional[SummarizeRequestFormat]. One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. + - length: typing.Optional[SummarizeRequestLength]. One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text. - - model: typing.Optional[str]. The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. + - format: typing.Optional[SummarizeRequestFormat]. One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. - - extractiveness: typing.Optional[SummarizeRequestExtractiveness]. One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. + - model: typing.Optional[str]. The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. - - temperature: typing.Optional[float]. Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. + - extractiveness: typing.Optional[SummarizeRequestExtractiveness]. One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. - - additional_command: typing.Optional[str]. A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" + - temperature: typing.Optional[float]. Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. - - request_options: typing.Optional[RequestOptions]. Request-specific configuration. - --- - from cohere.client import AsyncCohere + - additional_command: typing.Optional[str]. A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" - client = AsyncCohere(client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) - await client.summarize(text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases. + - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import AsyncClient - The meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo - all variants. Analogues made from dairy alternatives, such as goat"s or sheep"s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', ) + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + await client.summarize( + text='Ice cream is a sweetened frozen food typically eaten as a snack or dessert. It may be made from milk or cream and is flavoured with a sweetener, either sugar or an alternative, and a spice, such as cocoa or vanilla, or with fruit such as strawberries or peaches. It can also be made by whisking a flavored cream base and liquid nitrogen together. Food coloring is sometimes added, in addition to stabilizers. The mixture is cooled below the freezing point of water and stirred to incorporate air spaces and to prevent detectable ice crystals from forming. The result is a smooth, semi-solid foam that is solid at very low temperatures (below 2 °C or 35 °F). It becomes more malleable as its temperature increases.\n\nThe meaning of the name "ice cream" varies from one country to another. In some countries, such as the United States, "ice cream" applies only to a specific variety, and most governments regulate the commercial use of the various terms according to the relative quantities of the main ingredients, notably the amount of cream. Products that do not meet the criteria to be called ice cream are sometimes labelled "frozen dairy dessert" instead. In other countries, such as Italy and Argentina, one word is used fo\r all variants. Analogues made from dairy alternatives, such as goat\'s or sheep\'s milk, or milk substitutes (e.g., soy, cashew, coconut, almond milk or tofu), are available for those who are lactose intolerant, allergic to dairy protein or vegan.', + ) """ _request: typing.Dict[str, typing.Any] = {"text": text} if length is not OMIT: @@ -2633,9 +2901,9 @@ async def tokenize( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -2704,9 +2972,9 @@ async def detokenize( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -2754,7 +3022,7 @@ async def detokenize( raise ApiError(status_code=_response.status_code, body=_response_json) -def _get_base_url(*, base_url: typing.Optional[str] = None, environment: CohereEnvironment) -> str: +def _get_base_url(*, base_url: typing.Optional[str] = None, environment: ClientEnvironment) -> str: if base_url is not None: return base_url elif environment is not None: diff --git a/src/cohere/connectors/client.py b/src/cohere/connectors/client.py index fdd907262..49a75b71a 100644 --- a/src/cohere/connectors/client.py +++ b/src/cohere/connectors/client.py @@ -53,9 +53,9 @@ def list( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -140,9 +140,9 @@ def create( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -215,9 +215,9 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -270,9 +270,9 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -353,9 +353,9 @@ def update( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -439,9 +439,9 @@ def o_auth_authorize( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -521,9 +521,9 @@ async def list( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -608,9 +608,9 @@ async def create( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -683,9 +683,9 @@ async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -740,9 +740,9 @@ async def delete( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -823,9 +823,9 @@ async def update( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -909,9 +909,9 @@ async def o_auth_authorize( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) diff --git a/src/cohere/core/client_wrapper.py b/src/cohere/core/client_wrapper.py index e8d288f5e..74ec55d2c 100644 --- a/src/cohere/core/client_wrapper.py +++ b/src/cohere/core/client_wrapper.py @@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "cohere", - "X-Fern-SDK-Version": "5.0.0a10", + "X-Fern-SDK-Version": "5.0.0a11", } if self._client_name is not None: headers["X-Client-Name"] = self._client_name diff --git a/src/cohere/core/http_client.py b/src/cohere/core/http_client.py index fbbbc15ec..4e6877df2 100644 --- a/src/cohere/core/http_client.py +++ b/src/cohere/core/http_client.py @@ -5,6 +5,7 @@ import re import time import typing +from contextlib import asynccontextmanager, contextmanager from functools import wraps from random import random @@ -98,8 +99,10 @@ def request( return response @wraps(httpx.Client.stream) + @contextmanager def stream(self, *args: typing.Any, max_retries: int = 0, retries: int = 0, **kwargs: typing.Any) -> typing.Any: - return self.httpx_client.stream(*args, **kwargs) + with self.httpx_client.stream(*args, **kwargs) as stream: + yield stream class AsyncHttpClient: @@ -118,8 +121,10 @@ async def request( return await self.request(max_retries=max_retries, retries=retries + 1, *args, **kwargs) return response - @wraps(httpx.AsyncClient.request) + @wraps(httpx.AsyncClient.stream) + @asynccontextmanager async def stream( self, *args: typing.Any, max_retries: int = 0, retries: int = 0, **kwargs: typing.Any ) -> typing.Any: - return self.httpx_client.stream(*args, **kwargs) + async with self.httpx_client.stream(*args, **kwargs) as stream: + yield stream diff --git a/src/cohere/core/jsonable_encoder.py b/src/cohere/core/jsonable_encoder.py index 37238ab67..d359330cc 100644 --- a/src/cohere/core/jsonable_encoder.py +++ b/src/cohere/core/jsonable_encoder.py @@ -65,10 +65,10 @@ def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any] return str(obj) if isinstance(obj, (str, int, float, type(None))): return obj - if isinstance(obj, dt.date): - return str(obj) if isinstance(obj, dt.datetime): return serialize_datetime(obj) + if isinstance(obj, dt.date): + return str(obj) if isinstance(obj, dict): encoded_dict = {} allowed_keys = set(obj.keys()) diff --git a/src/cohere/datasets/client.py b/src/cohere/datasets/client.py index de15aee45..0d00da08b 100644 --- a/src/cohere/datasets/client.py +++ b/src/cohere/datasets/client.py @@ -58,9 +58,9 @@ def list( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -149,6 +149,23 @@ def create( - eval_data: typing.Optional[core.File]. See core.File for more documentation - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import Client + + client = Client( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + client.datasets.create( + name="string", + type="embed-input", + keep_original_file=True, + skip_malformed_input=True, + keep_fields="string", + optional_fields="string", + text_separator="string", + csv_delimiter="string", + ) """ _response = self._client_wrapper.httpx_client.request( "POST", @@ -210,9 +227,9 @@ def get_usage(self, *, request_options: typing.Optional[RequestOptions] = None) Parameters: - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -257,9 +274,9 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -308,9 +325,9 @@ def delete( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -379,9 +396,9 @@ async def list( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -470,6 +487,23 @@ async def create( - eval_data: typing.Optional[core.File]. See core.File for more documentation - request_options: typing.Optional[RequestOptions]. Request-specific configuration. + --- + from cohere.client import AsyncClient + + client = AsyncClient( + client_name="YOUR_CLIENT_NAME", + token="YOUR_TOKEN", + ) + await client.datasets.create( + name="string", + type="embed-input", + keep_original_file=True, + skip_malformed_input=True, + keep_fields="string", + optional_fields="string", + text_separator="string", + csv_delimiter="string", + ) """ _response = await self._client_wrapper.httpx_client.request( "POST", @@ -531,9 +565,9 @@ async def get_usage(self, *, request_options: typing.Optional[RequestOptions] = Parameters: - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -578,9 +612,9 @@ async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -629,9 +663,9 @@ async def delete( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) diff --git a/src/cohere/embed_jobs/client.py b/src/cohere/embed_jobs/client.py index d929bfa9f..21d49b448 100644 --- a/src/cohere/embed_jobs/client.py +++ b/src/cohere/embed_jobs/client.py @@ -39,9 +39,9 @@ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> Li Parameters: - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -116,9 +116,9 @@ def create( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -182,9 +182,9 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -237,9 +237,9 @@ def cancel(self, id: str, *, request_options: typing.Optional[RequestOptions] = - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -300,9 +300,9 @@ async def list(self, *, request_options: typing.Optional[RequestOptions] = None) Parameters: - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -377,9 +377,9 @@ async def create( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -443,9 +443,9 @@ async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -498,9 +498,9 @@ async def cancel(self, id: str, *, request_options: typing.Optional[RequestOptio - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) diff --git a/src/cohere/environment.py b/src/cohere/environment.py index 75680a822..195c6937f 100644 --- a/src/cohere/environment.py +++ b/src/cohere/environment.py @@ -3,5 +3,5 @@ import enum -class CohereEnvironment(enum.Enum): +class ClientEnvironment(enum.Enum): PRODUCTION = "https://api.cohere.ai/v1" diff --git a/src/cohere/models/client.py b/src/cohere/models/client.py index 4e0a3a06e..a451b3a0f 100644 --- a/src/cohere/models/client.py +++ b/src/cohere/models/client.py @@ -43,9 +43,9 @@ def list( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import Cohere + from cohere.client import Client - client = Cohere( + client = Client( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) @@ -117,9 +117,9 @@ async def list( - request_options: typing.Optional[RequestOptions]. Request-specific configuration. --- - from cohere.client import AsyncCohere + from cohere.client import AsyncClient - client = AsyncCohere( + client = AsyncClient( client_name="YOUR_CLIENT_NAME", token="YOUR_TOKEN", ) diff --git a/src/cohere/types/chat_citation_generation_event.py b/src/cohere/types/chat_citation_generation_event.py index c868eab44..421709db8 100644 --- a/src/cohere/types/chat_citation_generation_event.py +++ b/src/cohere/types/chat_citation_generation_event.py @@ -31,5 +31,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_data_metrics.py b/src/cohere/types/chat_data_metrics.py index eec8f4ffc..2754a4280 100644 --- a/src/cohere/types/chat_data_metrics.py +++ b/src/cohere/types/chat_data_metrics.py @@ -39,5 +39,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_search_queries_generation_event.py b/src/cohere/types/chat_search_queries_generation_event.py index b11e6197b..2728f76c9 100644 --- a/src/cohere/types/chat_search_queries_generation_event.py +++ b/src/cohere/types/chat_search_queries_generation_event.py @@ -31,5 +31,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_search_results_event.py b/src/cohere/types/chat_search_results_event.py index 0bde952af..cc92979f1 100644 --- a/src/cohere/types/chat_search_results_event.py +++ b/src/cohere/types/chat_search_results_event.py @@ -37,5 +37,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_stream_end_event.py b/src/cohere/types/chat_stream_end_event.py index 6fccd4efd..24bd66134 100644 --- a/src/cohere/types/chat_stream_end_event.py +++ b/src/cohere/types/chat_stream_end_event.py @@ -41,5 +41,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_stream_start_event.py b/src/cohere/types/chat_stream_start_event.py index 8652e436a..38e3d495a 100644 --- a/src/cohere/types/chat_stream_start_event.py +++ b/src/cohere/types/chat_stream_start_event.py @@ -30,5 +30,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_text_generation_event.py b/src/cohere/types/chat_text_generation_event.py index b344b5913..d3b29b53c 100644 --- a/src/cohere/types/chat_text_generation_event.py +++ b/src/cohere/types/chat_text_generation_event.py @@ -30,5 +30,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/chat_tool_calls_generation_event.py b/src/cohere/types/chat_tool_calls_generation_event.py index b3e641531..b726cd09a 100644 --- a/src/cohere/types/chat_tool_calls_generation_event.py +++ b/src/cohere/types/chat_tool_calls_generation_event.py @@ -28,5 +28,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/classify_data_metrics.py b/src/cohere/types/classify_data_metrics.py index fa5955eb1..a9fdfb9cc 100644 --- a/src/cohere/types/classify_data_metrics.py +++ b/src/cohere/types/classify_data_metrics.py @@ -27,5 +27,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/dataset.py b/src/cohere/types/dataset.py index 814d37ea2..b54f6a898 100644 --- a/src/cohere/types/dataset.py +++ b/src/cohere/types/dataset.py @@ -71,5 +71,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/embed_by_type_response_embeddings.py b/src/cohere/types/embed_by_type_response_embeddings.py index 062cbc084..eebfd183d 100644 --- a/src/cohere/types/embed_by_type_response_embeddings.py +++ b/src/cohere/types/embed_by_type_response_embeddings.py @@ -53,5 +53,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/embed_response.py b/src/cohere/types/embed_response.py index 17e111a06..ea82e2460 100644 --- a/src/cohere/types/embed_response.py +++ b/src/cohere/types/embed_response.py @@ -15,6 +15,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class EmbedResponse_EmbeddingsByType(EmbedByTypeResponse): @@ -24,6 +25,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True EmbedResponse = typing.Union[EmbedResponse_EmbeddingsFloats, EmbedResponse_EmbeddingsByType] diff --git a/src/cohere/types/finetune_dataset_metrics.py b/src/cohere/types/finetune_dataset_metrics.py index 5518e718c..05c7391e2 100644 --- a/src/cohere/types/finetune_dataset_metrics.py +++ b/src/cohere/types/finetune_dataset_metrics.py @@ -54,5 +54,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/generate_stream_end.py b/src/cohere/types/generate_stream_end.py index 16c6d6c33..b014ef360 100644 --- a/src/cohere/types/generate_stream_end.py +++ b/src/cohere/types/generate_stream_end.py @@ -31,5 +31,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/generate_stream_error.py b/src/cohere/types/generate_stream_error.py index 44b2c44a1..45ab2db44 100644 --- a/src/cohere/types/generate_stream_error.py +++ b/src/cohere/types/generate_stream_error.py @@ -38,5 +38,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/generate_stream_text.py b/src/cohere/types/generate_stream_text.py index 441986738..0907cdc8d 100644 --- a/src/cohere/types/generate_stream_text.py +++ b/src/cohere/types/generate_stream_text.py @@ -37,5 +37,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/generate_streamed_response.py b/src/cohere/types/generate_streamed_response.py index 53eb80102..6c20aa8fe 100644 --- a/src/cohere/types/generate_streamed_response.py +++ b/src/cohere/types/generate_streamed_response.py @@ -16,6 +16,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class GenerateStreamedResponse_StreamEnd(GenerateStreamEnd): @@ -25,6 +26,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class GenerateStreamedResponse_StreamError(GenerateStreamError): @@ -34,6 +36,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True GenerateStreamedResponse = typing.Union[ diff --git a/src/cohere/types/label_metric.py b/src/cohere/types/label_metric.py index 20504631a..f9edc886a 100644 --- a/src/cohere/types/label_metric.py +++ b/src/cohere/types/label_metric.py @@ -39,5 +39,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/reranker_data_metrics.py b/src/cohere/types/reranker_data_metrics.py index c116467ff..a0ab206f6 100644 --- a/src/cohere/types/reranker_data_metrics.py +++ b/src/cohere/types/reranker_data_metrics.py @@ -54,5 +54,6 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True extra = pydantic.Extra.allow json_encoders = {dt.datetime: serialize_datetime} diff --git a/src/cohere/types/streamed_chat_response.py b/src/cohere/types/streamed_chat_response.py index e7ff572ce..e99b351a7 100644 --- a/src/cohere/types/streamed_chat_response.py +++ b/src/cohere/types/streamed_chat_response.py @@ -20,6 +20,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class StreamedChatResponse_SearchQueriesGeneration(ChatSearchQueriesGenerationEvent): @@ -29,6 +30,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class StreamedChatResponse_SearchResults(ChatSearchResultsEvent): @@ -38,6 +40,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class StreamedChatResponse_TextGeneration(ChatTextGenerationEvent): @@ -47,6 +50,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class StreamedChatResponse_CitationGeneration(ChatCitationGenerationEvent): @@ -56,6 +60,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class StreamedChatResponse_ToolCallsGeneration(ChatToolCallsGenerationEvent): @@ -65,6 +70,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True class StreamedChatResponse_StreamEnd(ChatStreamEndEvent): @@ -74,6 +80,7 @@ class Config: frozen = True smart_union = True allow_population_by_field_name = True + populate_by_name = True StreamedChatResponse = typing.Union[