From ed4edd272a8b21a0a7300a8d20823683e475e44f Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 19 Sep 2024 15:56:59 +0000
Subject: [PATCH 1/2] SDK regeneration
---
pyproject.toml | 2 +-
reference.md | 82 +++++++++-
src/cohere/__init__.py | 20 +--
src/cohere/core/client_wrapper.py | 2 +-
src/cohere/finetuning/__init__.py | 2 +
src/cohere/finetuning/finetuning/__init__.py | 2 +
.../finetuning/finetuning/types/__init__.py | 2 +
.../finetuning/finetuning/types/base_model.py | 2 +-
.../finetuning/types/hyperparameters.py | 18 +++
.../finetuning/types/lora_target_modules.py | 13 ++
src/cohere/types/__init__.py | 28 +---
src/cohere/types/embed_request_v2.py | 107 -------------
src/cohere/types/images.py | 50 -------
src/cohere/types/texts.py | 62 --------
src/cohere/types/texts_truncate.py | 5 -
src/cohere/v2/__init__.py | 2 +
src/cohere/v2/client.py | 140 +++++++++++++++---
src/cohere/v2/types/__init__.py | 2 +
.../v2/types/v2embed_request_truncate.py | 5 +
19 files changed, 258 insertions(+), 288 deletions(-)
create mode 100644 src/cohere/finetuning/finetuning/types/lora_target_modules.py
delete mode 100644 src/cohere/types/embed_request_v2.py
delete mode 100644 src/cohere/types/images.py
delete mode 100644 src/cohere/types/texts.py
delete mode 100644 src/cohere/types/texts_truncate.py
create mode 100644 src/cohere/v2/types/v2embed_request_truncate.py
diff --git a/pyproject.toml b/pyproject.toml
index cd1405841..973e48de1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "cohere"
-version = "5.9.3"
+version = "5.9.4"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index c91398c36..95ebe505b 100644
--- a/reference.md
+++ b/reference.md
@@ -2858,17 +2858,14 @@ If you want to learn more how to use the embedding model, have a look at the [Se
```python
-from cohere import Client, ImageEmbedRequestV2
+from cohere import Client
client = Client(
client_name="YOUR_CLIENT_NAME",
token="YOUR_TOKEN",
)
client.v2.embed(
- request=ImageEmbedRequestV2(
- images=["string"],
- model="string",
- ),
+ model="model",
)
```
@@ -2885,7 +2882,80 @@ client.v2.embed(
-
-**request:** `EmbedRequestV2`
+**model:** `str`
+
+Defaults to embed-english-v2.0
+
+The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
+
+Available models and corresponding embedding dimensions:
+
+* `embed-english-v3.0` 1024
+* `embed-multilingual-v3.0` 1024
+* `embed-english-light-v3.0` 384
+* `embed-multilingual-light-v3.0` 384
+
+* `embed-english-v2.0` 4096
+* `embed-english-light-v2.0` 1024
+* `embed-multilingual-v2.0` 768
+
+
+
+
+
+-
+
+**texts:** `typing.Optional[typing.Sequence[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
+
+
+
+
+
+-
+
+**images:** `typing.Optional[typing.Sequence[str]]`
+
+An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
+
+The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.
+
+
+
+
+
+-
+
+**input_type:** `typing.Optional[EmbedInputType]`
+
+
+
+
+
+-
+
+**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]`
+
+Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
+
+* `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
+* `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
+* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
+* `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
+* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
+
+
+
+
+
+-
+
+**truncate:** `typing.Optional[V2EmbedRequestTruncate]`
+
+One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
+
+Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
+
+If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
diff --git a/src/cohere/__init__.py b/src/cohere/__init__.py
index dd374d7d6..eef186d88 100644
--- a/src/cohere/__init__.py
+++ b/src/cohere/__init__.py
@@ -80,7 +80,6 @@
CitationStartEventDelta,
CitationStartEventDeltaMessage,
CitationStartStreamedChatResponseV2,
- ClassificationEmbedRequestV2,
ClassifyDataMetrics,
ClassifyExample,
ClassifyRequestTruncate,
@@ -89,7 +88,6 @@
ClassifyResponseClassificationsItemClassificationType,
ClassifyResponseClassificationsItemLabelsValue,
ClientClosedRequestErrorBody,
- ClusteringEmbedRequestV2,
CompatibleEndpoint,
Connector,
ConnectorAuthStatus,
@@ -120,7 +118,6 @@
EmbedJobStatus,
EmbedJobTruncate,
EmbedRequestTruncate,
- EmbedRequestV2,
EmbedResponse,
EmbeddingType,
EmbeddingsByTypeEmbedResponse,
@@ -141,8 +138,6 @@
Generation,
GetConnectorResponse,
GetModelResponse,
- ImageEmbedRequestV2,
- Images,
JsonObjectResponseFormat,
JsonObjectResponseFormatV2,
JsonResponseFormat,
@@ -169,9 +164,7 @@
RerankerDataMetrics,
ResponseFormat,
ResponseFormatV2,
- SearchDocumentEmbedRequestV2,
SearchQueriesGenerationStreamedChatResponse,
- SearchQueryEmbedRequestV2,
SearchResultsStreamedChatResponse,
SingleGeneration,
SingleGenerationInStream,
@@ -200,8 +193,6 @@
TextResponseFormatV2,
TextSystemMessageContentItem,
TextToolContent,
- Texts,
- TextsTruncate,
TokenizeResponse,
TooManyRequestsErrorBody,
Tool,
@@ -267,6 +258,7 @@
V2ChatRequestSafetyMode,
V2ChatStreamRequestDocumentsItem,
V2ChatStreamRequestSafetyMode,
+ V2EmbedRequestTruncate,
V2RerankRequestDocumentsItem,
V2RerankResponse,
V2RerankResponseResultsItem,
@@ -359,7 +351,6 @@
"CitationStartEventDelta",
"CitationStartEventDeltaMessage",
"CitationStartStreamedChatResponseV2",
- "ClassificationEmbedRequestV2",
"ClassifyDataMetrics",
"ClassifyExample",
"ClassifyRequestTruncate",
@@ -372,7 +363,6 @@
"ClientClosedRequestErrorBody",
"ClientEnvironment",
"ClientV2",
- "ClusteringEmbedRequestV2",
"CompatibleEndpoint",
"Connector",
"ConnectorAuthStatus",
@@ -409,7 +399,6 @@
"EmbedJobStatus",
"EmbedJobTruncate",
"EmbedRequestTruncate",
- "EmbedRequestV2",
"EmbedResponse",
"EmbeddingType",
"EmbeddingsByTypeEmbedResponse",
@@ -432,8 +421,6 @@
"Generation",
"GetConnectorResponse",
"GetModelResponse",
- "ImageEmbedRequestV2",
- "Images",
"InternalServerError",
"JsonObjectResponseFormat",
"JsonObjectResponseFormatV2",
@@ -464,9 +451,7 @@
"ResponseFormat",
"ResponseFormatV2",
"SagemakerClient",
- "SearchDocumentEmbedRequestV2",
"SearchQueriesGenerationStreamedChatResponse",
- "SearchQueryEmbedRequestV2",
"SearchResultsStreamedChatResponse",
"ServiceUnavailableError",
"SingleGeneration",
@@ -496,8 +481,6 @@
"TextResponseFormatV2",
"TextSystemMessageContentItem",
"TextToolContent",
- "Texts",
- "TextsTruncate",
"TokenizeResponse",
"TooManyRequestsError",
"TooManyRequestsErrorBody",
@@ -536,6 +519,7 @@
"V2ChatRequestSafetyMode",
"V2ChatStreamRequestDocumentsItem",
"V2ChatStreamRequestSafetyMode",
+ "V2EmbedRequestTruncate",
"V2RerankRequestDocumentsItem",
"V2RerankResponse",
"V2RerankResponseResultsItem",
diff --git a/src/cohere/core/client_wrapper.py b/src/cohere/core/client_wrapper.py
index 10e94aea9..7d323ab41 100644
--- a/src/cohere/core/client_wrapper.py
+++ b/src/cohere/core/client_wrapper.py
@@ -24,7 +24,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "cohere",
- "X-Fern-SDK-Version": "5.9.3",
+ "X-Fern-SDK-Version": "5.9.4",
}
if self._client_name is not None:
headers["X-Client-Name"] = self._client_name
diff --git a/src/cohere/finetuning/__init__.py b/src/cohere/finetuning/__init__.py
index ea532e8b2..29eed472f 100644
--- a/src/cohere/finetuning/__init__.py
+++ b/src/cohere/finetuning/__init__.py
@@ -13,6 +13,7 @@
ListEventsResponse,
ListFinetunedModelsResponse,
ListTrainingStepMetricsResponse,
+ LoraTargetModules,
Settings,
Status,
Strategy,
@@ -33,6 +34,7 @@
"ListEventsResponse",
"ListFinetunedModelsResponse",
"ListTrainingStepMetricsResponse",
+ "LoraTargetModules",
"Settings",
"Status",
"Strategy",
diff --git a/src/cohere/finetuning/finetuning/__init__.py b/src/cohere/finetuning/finetuning/__init__.py
index 71e235731..a8c6b7817 100644
--- a/src/cohere/finetuning/finetuning/__init__.py
+++ b/src/cohere/finetuning/finetuning/__init__.py
@@ -12,6 +12,7 @@
ListEventsResponse,
ListFinetunedModelsResponse,
ListTrainingStepMetricsResponse,
+ LoraTargetModules,
Settings,
Status,
Strategy,
@@ -32,6 +33,7 @@
"ListEventsResponse",
"ListFinetunedModelsResponse",
"ListTrainingStepMetricsResponse",
+ "LoraTargetModules",
"Settings",
"Status",
"Strategy",
diff --git a/src/cohere/finetuning/finetuning/types/__init__.py b/src/cohere/finetuning/finetuning/types/__init__.py
index d573f6518..72f975c71 100644
--- a/src/cohere/finetuning/finetuning/types/__init__.py
+++ b/src/cohere/finetuning/finetuning/types/__init__.py
@@ -11,6 +11,7 @@
from .list_events_response import ListEventsResponse
from .list_finetuned_models_response import ListFinetunedModelsResponse
from .list_training_step_metrics_response import ListTrainingStepMetricsResponse
+from .lora_target_modules import LoraTargetModules
from .settings import Settings
from .status import Status
from .strategy import Strategy
@@ -30,6 +31,7 @@
"ListEventsResponse",
"ListFinetunedModelsResponse",
"ListTrainingStepMetricsResponse",
+ "LoraTargetModules",
"Settings",
"Status",
"Strategy",
diff --git a/src/cohere/finetuning/finetuning/types/base_model.py b/src/cohere/finetuning/finetuning/types/base_model.py
index 44ad40c31..fc993edb6 100644
--- a/src/cohere/finetuning/finetuning/types/base_model.py
+++ b/src/cohere/finetuning/finetuning/types/base_model.py
@@ -30,7 +30,7 @@ class BaseModel(UncheckedBaseModel):
strategy: typing.Optional[Strategy] = pydantic.Field(default=None)
"""
- The fine-tuning strategy.
+ Deprecated: The fine-tuning strategy.
"""
if IS_PYDANTIC_V2:
diff --git a/src/cohere/finetuning/finetuning/types/hyperparameters.py b/src/cohere/finetuning/finetuning/types/hyperparameters.py
index 229549b46..2185f4c20 100644
--- a/src/cohere/finetuning/finetuning/types/hyperparameters.py
+++ b/src/cohere/finetuning/finetuning/types/hyperparameters.py
@@ -3,6 +3,7 @@
from ....core.unchecked_base_model import UncheckedBaseModel
import typing
import pydantic
+from .lora_target_modules import LoraTargetModules
from ....core.pydantic_utilities import IS_PYDANTIC_V2
@@ -38,6 +39,23 @@ class Hyperparameters(UncheckedBaseModel):
The learning rate to be used during training.
"""
+ lora_alpha: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Controls the scaling factor for LoRA updates. Higher values make the
+ updates more impactful.
+ """
+
+ lora_rank: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Specifies the rank for low-rank matrices. Lower ranks reduce parameters
+ but may limit model flexibility.
+ """
+
+ lora_target_modules: typing.Optional[LoraTargetModules] = pydantic.Field(default=None)
+ """
+ The combination of LoRA modules to target.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/cohere/finetuning/finetuning/types/lora_target_modules.py b/src/cohere/finetuning/finetuning/types/lora_target_modules.py
new file mode 100644
index 000000000..773ec7a5c
--- /dev/null
+++ b/src/cohere/finetuning/finetuning/types/lora_target_modules.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LoraTargetModules = typing.Union[
+ typing.Literal[
+ "LORA_TARGET_MODULES_UNSPECIFIED",
+ "LORA_TARGET_MODULES_QV",
+ "LORA_TARGET_MODULES_QKVO",
+ "LORA_TARGET_MODULES_QKVO_FFN",
+ ],
+ typing.Any,
+]
diff --git a/src/cohere/types/__init__.py b/src/cohere/types/__init__.py
index 106547ce1..ab2914d92 100644
--- a/src/cohere/types/__init__.py
+++ b/src/cohere/types/__init__.py
@@ -110,6 +110,7 @@
from .detokenize_response import DetokenizeResponse
from .document import Document
from .document_content import DocumentContent
+from .document_source import DocumentSource
from .embed_by_type_response import EmbedByTypeResponse
from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings
from .embed_floats_response import EmbedFloatsResponse
@@ -118,14 +119,6 @@
from .embed_job_status import EmbedJobStatus
from .embed_job_truncate import EmbedJobTruncate
from .embed_request_truncate import EmbedRequestTruncate
-from .embed_request_v2 import (
- ClassificationEmbedRequestV2,
- ClusteringEmbedRequestV2,
- EmbedRequestV2,
- ImageEmbedRequestV2,
- SearchDocumentEmbedRequestV2,
- SearchQueryEmbedRequestV2,
-)
from .embed_response import EmbedResponse, EmbeddingsByTypeEmbedResponse, EmbeddingsFloatsEmbedResponse
from .embedding_type import EmbeddingType
from .finetune_dataset_metrics import FinetuneDatasetMetrics
@@ -149,7 +142,6 @@
from .generation import Generation
from .get_connector_response import GetConnectorResponse
from .get_model_response import GetModelResponse
-from .images import Images
from .json_response_format import JsonResponseFormat
from .json_response_format_v2 import JsonResponseFormatV2
from .label_metric import LabelMetric
@@ -205,10 +197,12 @@
from .summarize_request_format import SummarizeRequestFormat
from .summarize_request_length import SummarizeRequestLength
from .summarize_response import SummarizeResponse
+from .system_message import SystemMessage
from .system_message_content import SystemMessageContent
from .system_message_content_item import SystemMessageContentItem, TextSystemMessageContentItem
-from .texts import Texts
-from .texts_truncate import TextsTruncate
+from .text_content import TextContent
+from .text_response_format import TextResponseFormat
+from .text_response_format_v2 import TextResponseFormatV2
from .tokenize_response import TokenizeResponse
from .too_many_requests_error_body import TooManyRequestsErrorBody
from .tool import Tool
@@ -217,10 +211,12 @@
from .tool_call_v2 import ToolCallV2
from .tool_call_v2function import ToolCallV2Function
from .tool_content import DocumentToolContent, TextToolContent, ToolContent
+from .tool_message import ToolMessage
from .tool_message_v2 import ToolMessageV2
from .tool_message_v2tool_content import ToolMessageV2ToolContent
from .tool_parameter_definitions_value import ToolParameterDefinitionsValue
from .tool_result import ToolResult
+from .tool_source import ToolSource
from .tool_v2 import ToolV2
from .tool_v2function import ToolV2Function
from .unprocessable_entity_error_body import UnprocessableEntityErrorBody
@@ -228,6 +224,7 @@
from .usage import Usage
from .usage_billed_units import UsageBilledUnits
from .usage_tokens import UsageTokens
+from .user_message import UserMessage
from .user_message_content import UserMessageContent
__all__ = [
@@ -310,7 +307,6 @@
"CitationStartEventDelta",
"CitationStartEventDeltaMessage",
"CitationStartStreamedChatResponseV2",
- "ClassificationEmbedRequestV2",
"ClassifyDataMetrics",
"ClassifyExample",
"ClassifyRequestTruncate",
@@ -319,7 +315,6 @@
"ClassifyResponseClassificationsItemClassificationType",
"ClassifyResponseClassificationsItemLabelsValue",
"ClientClosedRequestErrorBody",
- "ClusteringEmbedRequestV2",
"CompatibleEndpoint",
"Connector",
"ConnectorAuthStatus",
@@ -350,7 +345,6 @@
"EmbedJobStatus",
"EmbedJobTruncate",
"EmbedRequestTruncate",
- "EmbedRequestV2",
"EmbedResponse",
"EmbeddingType",
"EmbeddingsByTypeEmbedResponse",
@@ -371,8 +365,6 @@
"Generation",
"GetConnectorResponse",
"GetModelResponse",
- "ImageEmbedRequestV2",
- "Images",
"JsonObjectResponseFormat",
"JsonObjectResponseFormatV2",
"JsonResponseFormat",
@@ -399,9 +391,7 @@
"RerankerDataMetrics",
"ResponseFormat",
"ResponseFormatV2",
- "SearchDocumentEmbedRequestV2",
"SearchQueriesGenerationStreamedChatResponse",
- "SearchQueryEmbedRequestV2",
"SearchResultsStreamedChatResponse",
"SingleGeneration",
"SingleGenerationInStream",
@@ -430,8 +420,6 @@
"TextResponseFormatV2",
"TextSystemMessageContentItem",
"TextToolContent",
- "Texts",
- "TextsTruncate",
"TokenizeResponse",
"TooManyRequestsErrorBody",
"Tool",
diff --git a/src/cohere/types/embed_request_v2.py b/src/cohere/types/embed_request_v2.py
deleted file mode 100644
index d5c36dbb5..000000000
--- a/src/cohere/types/embed_request_v2.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-from .embedding_type import EmbeddingType
-from .texts_truncate import TextsTruncate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-import typing_extensions
-from ..core.unchecked_base_model import UnionMetadata
-
-
-class SearchDocumentEmbedRequestV2(UncheckedBaseModel):
- input_type: typing.Literal["search_document"] = "search_document"
- texts: typing.List[str]
- model: str
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = None
- truncate: typing.Optional[TextsTruncate] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class SearchQueryEmbedRequestV2(UncheckedBaseModel):
- input_type: typing.Literal["search_query"] = "search_query"
- texts: typing.List[str]
- model: str
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = None
- truncate: typing.Optional[TextsTruncate] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ClassificationEmbedRequestV2(UncheckedBaseModel):
- input_type: typing.Literal["classification"] = "classification"
- texts: typing.List[str]
- model: str
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = None
- truncate: typing.Optional[TextsTruncate] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ClusteringEmbedRequestV2(UncheckedBaseModel):
- input_type: typing.Literal["clustering"] = "clustering"
- texts: typing.List[str]
- model: str
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = None
- truncate: typing.Optional[TextsTruncate] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-class ImageEmbedRequestV2(UncheckedBaseModel):
- input_type: typing.Literal["image"] = "image"
- images: typing.List[str]
- model: str
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
-
-
-EmbedRequestV2 = typing_extensions.Annotated[
- typing.Union[
- SearchDocumentEmbedRequestV2,
- SearchQueryEmbedRequestV2,
- ClassificationEmbedRequestV2,
- ClusteringEmbedRequestV2,
- ImageEmbedRequestV2,
- ],
- UnionMetadata(discriminant="input_type"),
-]
diff --git a/src/cohere/types/images.py b/src/cohere/types/images.py
deleted file mode 100644
index 5c829189a..000000000
--- a/src/cohere/types/images.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from .embedding_type import EmbeddingType
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class Images(UncheckedBaseModel):
- images: typing.List[str] = pydantic.Field()
- """
- An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
- The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.
- """
-
- model: str = pydantic.Field()
- """
- Defaults to embed-english-v2.0
- The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
- Available models and corresponding embedding dimensions:
-
- - `embed-english-v3.0` 1024
- - `embed-multilingual-v3.0` 1024
- - `embed-english-light-v3.0` 384
- - `embed-multilingual-light-v3.0` 384
- - `embed-english-v2.0` 4096
- - `embed-english-light-v2.0` 1024
- - `embed-multilingual-v2.0` 768
- """
-
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = pydantic.Field(default=None)
- """
- Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
-
- - `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
- - `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
- - `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
- - `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
- - `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/cohere/types/texts.py b/src/cohere/types/texts.py
deleted file mode 100644
index 2f2d0978a..000000000
--- a/src/cohere/types/texts.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from .embedding_type import EmbeddingType
-from .texts_truncate import TextsTruncate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class Texts(UncheckedBaseModel):
- texts: typing.List[str] = pydantic.Field()
- """
- An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
- """
-
- model: str = pydantic.Field()
- """
- Defaults to embed-english-v2.0
-
- The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
-
- Available models and corresponding embedding dimensions:
-
- - `embed-english-v3.0` 1024
- - `embed-multilingual-v3.0` 1024
- - `embed-english-light-v3.0` 384
- - `embed-multilingual-light-v3.0` 384
-
- - `embed-english-v2.0` 4096
- - `embed-english-light-v2.0` 1024
- - `embed-multilingual-v2.0` 768
- """
-
- embedding_types: typing.Optional[typing.List[EmbeddingType]] = pydantic.Field(default=None)
- """
- Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
-
- - `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
- - `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
- - `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
- - `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
- - `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
- """
-
- truncate: typing.Optional[TextsTruncate] = pydantic.Field(default=None)
- """
- One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
-
- Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
-
- If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/cohere/types/texts_truncate.py b/src/cohere/types/texts_truncate.py
deleted file mode 100644
index b0e2faf0e..000000000
--- a/src/cohere/types/texts_truncate.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TextsTruncate = typing.Union[typing.Literal["NONE", "START", "END"], typing.Any]
diff --git a/src/cohere/v2/__init__.py b/src/cohere/v2/__init__.py
index 4a904803f..f84be0915 100644
--- a/src/cohere/v2/__init__.py
+++ b/src/cohere/v2/__init__.py
@@ -5,6 +5,7 @@
V2ChatRequestSafetyMode,
V2ChatStreamRequestDocumentsItem,
V2ChatStreamRequestSafetyMode,
+ V2EmbedRequestTruncate,
V2RerankRequestDocumentsItem,
V2RerankResponse,
V2RerankResponseResultsItem,
@@ -16,6 +17,7 @@
"V2ChatRequestSafetyMode",
"V2ChatStreamRequestDocumentsItem",
"V2ChatStreamRequestSafetyMode",
+ "V2EmbedRequestTruncate",
"V2RerankRequestDocumentsItem",
"V2RerankResponse",
"V2RerankResponseResultsItem",
diff --git a/src/cohere/v2/client.py b/src/cohere/v2/client.py
index b504acfb5..a462ca464 100644
--- a/src/cohere/v2/client.py
+++ b/src/cohere/v2/client.py
@@ -35,7 +35,9 @@
from .types.v2chat_request_documents_item import V2ChatRequestDocumentsItem
from .types.v2chat_request_safety_mode import V2ChatRequestSafetyMode
from ..types.chat_response import ChatResponse
-from ..types.embed_request_v2 import EmbedRequestV2
+from ..types.embed_input_type import EmbedInputType
+from ..types.embedding_type import EmbeddingType
+from .types.v2embed_request_truncate import V2EmbedRequestTruncate
from ..types.embed_by_type_response import EmbedByTypeResponse
from .types.v2rerank_request_documents_item import V2RerankRequestDocumentsItem
from .types.v2rerank_response import V2RerankResponse
@@ -663,7 +665,15 @@ def chat(
raise ApiError(status_code=_response.status_code, body=_response_json)
def embed(
- self, *, request: EmbedRequestV2, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ model: str,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_type: typing.Optional[EmbedInputType] = OMIT,
+ embedding_types: typing.Optional[typing.Sequence[EmbeddingType]] = OMIT,
+ truncate: typing.Optional[V2EmbedRequestTruncate] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> EmbedByTypeResponse:
"""
This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents.
@@ -674,7 +684,47 @@ def embed(
Parameters
----------
- request : EmbedRequestV2
+ model : str
+ Defaults to embed-english-v2.0
+
+ The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
+
+ Available models and corresponding embedding dimensions:
+
+ * `embed-english-v3.0` 1024
+ * `embed-multilingual-v3.0` 1024
+ * `embed-english-light-v3.0` 384
+ * `embed-multilingual-light-v3.0` 384
+
+ * `embed-english-v2.0` 4096
+ * `embed-english-light-v2.0` 1024
+ * `embed-multilingual-v2.0` 768
+
+ texts : typing.Optional[typing.Sequence[str]]
+ An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
+
+ images : typing.Optional[typing.Sequence[str]]
+ An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
+
+ The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.
+
+ input_type : typing.Optional[EmbedInputType]
+
+ embedding_types : typing.Optional[typing.Sequence[EmbeddingType]]
+ Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
+
+ * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
+ * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
+ * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
+ * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
+ * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
+
+ truncate : typing.Optional[V2EmbedRequestTruncate]
+ One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
+
+ Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
+
+ If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -686,23 +736,27 @@ def embed(
Examples
--------
- from cohere import Client, ImageEmbedRequestV2
+ from cohere import Client
client = Client(
client_name="YOUR_CLIENT_NAME",
token="YOUR_TOKEN",
)
client.v2.embed(
- request=ImageEmbedRequestV2(
- images=["string"],
- model="string",
- ),
+ model="model",
)
"""
_response = self._client_wrapper.httpx_client.request(
"v2/embed",
method="POST",
- json=convert_and_respect_annotation_metadata(object_=request, annotation=EmbedRequestV2, direction="write"),
+ json={
+ "texts": texts,
+ "images": images,
+ "model": model,
+ "input_type": input_type,
+ "embedding_types": embedding_types,
+ "truncate": truncate,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1672,7 +1726,15 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def embed(
- self, *, request: EmbedRequestV2, request_options: typing.Optional[RequestOptions] = None
+ self,
+ *,
+ model: str,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_type: typing.Optional[EmbedInputType] = OMIT,
+ embedding_types: typing.Optional[typing.Sequence[EmbeddingType]] = OMIT,
+ truncate: typing.Optional[V2EmbedRequestTruncate] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> EmbedByTypeResponse:
"""
This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents.
@@ -1683,7 +1745,47 @@ async def embed(
Parameters
----------
- request : EmbedRequestV2
+ model : str
+ Defaults to embed-english-v2.0
+
+ The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
+
+ Available models and corresponding embedding dimensions:
+
+ * `embed-english-v3.0` 1024
+ * `embed-multilingual-v3.0` 1024
+ * `embed-english-light-v3.0` 384
+ * `embed-multilingual-light-v3.0` 384
+
+ * `embed-english-v2.0` 4096
+ * `embed-english-light-v2.0` 1024
+ * `embed-multilingual-v2.0` 768
+
+ texts : typing.Optional[typing.Sequence[str]]
+ An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
+
+ images : typing.Optional[typing.Sequence[str]]
+ An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
+
+ The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.
+
+ input_type : typing.Optional[EmbedInputType]
+
+ embedding_types : typing.Optional[typing.Sequence[EmbeddingType]]
+ Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
+
+ * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
+ * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
+ * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
+ * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
+ * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
+
+ truncate : typing.Optional[V2EmbedRequestTruncate]
+ One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
+
+ Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
+
+ If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1697,7 +1799,7 @@ async def embed(
--------
import asyncio
- from cohere import AsyncClient, ImageEmbedRequestV2
+ from cohere import AsyncClient
client = AsyncClient(
client_name="YOUR_CLIENT_NAME",
@@ -1707,10 +1809,7 @@ async def embed(
async def main() -> None:
await client.v2.embed(
- request=ImageEmbedRequestV2(
- images=["string"],
- model="string",
- ),
+ model="model",
)
@@ -1719,7 +1818,14 @@ async def main() -> None:
_response = await self._client_wrapper.httpx_client.request(
"v2/embed",
method="POST",
- json=convert_and_respect_annotation_metadata(object_=request, annotation=EmbedRequestV2, direction="write"),
+ json={
+ "texts": texts,
+ "images": images,
+ "model": model,
+ "input_type": input_type,
+ "embedding_types": embedding_types,
+ "truncate": truncate,
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/cohere/v2/types/__init__.py b/src/cohere/v2/types/__init__.py
index 529151db8..fff15903b 100644
--- a/src/cohere/v2/types/__init__.py
+++ b/src/cohere/v2/types/__init__.py
@@ -4,6 +4,7 @@
from .v2chat_request_safety_mode import V2ChatRequestSafetyMode
from .v2chat_stream_request_documents_item import V2ChatStreamRequestDocumentsItem
from .v2chat_stream_request_safety_mode import V2ChatStreamRequestSafetyMode
+from .v2embed_request_truncate import V2EmbedRequestTruncate
from .v2rerank_request_documents_item import V2RerankRequestDocumentsItem
from .v2rerank_response import V2RerankResponse
from .v2rerank_response_results_item import V2RerankResponseResultsItem
@@ -14,6 +15,7 @@
"V2ChatRequestSafetyMode",
"V2ChatStreamRequestDocumentsItem",
"V2ChatStreamRequestSafetyMode",
+ "V2EmbedRequestTruncate",
"V2RerankRequestDocumentsItem",
"V2RerankResponse",
"V2RerankResponseResultsItem",
diff --git a/src/cohere/v2/types/v2embed_request_truncate.py b/src/cohere/v2/types/v2embed_request_truncate.py
new file mode 100644
index 000000000..807e9939d
--- /dev/null
+++ b/src/cohere/v2/types/v2embed_request_truncate.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+V2EmbedRequestTruncate = typing.Union[typing.Literal["NONE", "START", "END"], typing.Any]
From 2b3b5053ffa1acb6b20e52b41a7d3a844c1c900d Mon Sep 17 00:00:00 2001
From: Billy Trend
Date: Thu, 19 Sep 2024 17:00:18 +0100
Subject: [PATCH 2/2] Fixes
---
src/cohere/types/__init__.py | 8 --------
1 file changed, 8 deletions(-)
diff --git a/src/cohere/types/__init__.py b/src/cohere/types/__init__.py
index ab2914d92..c52476d30 100644
--- a/src/cohere/types/__init__.py
+++ b/src/cohere/types/__init__.py
@@ -110,7 +110,6 @@
from .detokenize_response import DetokenizeResponse
from .document import Document
from .document_content import DocumentContent
-from .document_source import DocumentSource
from .embed_by_type_response import EmbedByTypeResponse
from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings
from .embed_floats_response import EmbedFloatsResponse
@@ -197,12 +196,8 @@
from .summarize_request_format import SummarizeRequestFormat
from .summarize_request_length import SummarizeRequestLength
from .summarize_response import SummarizeResponse
-from .system_message import SystemMessage
from .system_message_content import SystemMessageContent
from .system_message_content_item import SystemMessageContentItem, TextSystemMessageContentItem
-from .text_content import TextContent
-from .text_response_format import TextResponseFormat
-from .text_response_format_v2 import TextResponseFormatV2
from .tokenize_response import TokenizeResponse
from .too_many_requests_error_body import TooManyRequestsErrorBody
from .tool import Tool
@@ -211,12 +206,10 @@
from .tool_call_v2 import ToolCallV2
from .tool_call_v2function import ToolCallV2Function
from .tool_content import DocumentToolContent, TextToolContent, ToolContent
-from .tool_message import ToolMessage
from .tool_message_v2 import ToolMessageV2
from .tool_message_v2tool_content import ToolMessageV2ToolContent
from .tool_parameter_definitions_value import ToolParameterDefinitionsValue
from .tool_result import ToolResult
-from .tool_source import ToolSource
from .tool_v2 import ToolV2
from .tool_v2function import ToolV2Function
from .unprocessable_entity_error_body import UnprocessableEntityErrorBody
@@ -224,7 +217,6 @@
from .usage import Usage
from .usage_billed_units import UsageBilledUnits
from .usage_tokens import UsageTokens
-from .user_message import UserMessage
from .user_message_content import UserMessageContent
__all__ = [