Skip to content

Commit

Permalink
SDK regeneration
Browse files Browse the repository at this point in the history
  • Loading branch information
fern-api[bot] committed Oct 30, 2024
1 parent f31b9a5 commit 18f8709
Show file tree
Hide file tree
Showing 9 changed files with 79 additions and 27 deletions.
38 changes: 19 additions & 19 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "cohere"
version = "5.11.2"
version = "5.11.3"
description = ""
readme = "README.md"
authors = []
Expand Down
4 changes: 2 additions & 2 deletions reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -2319,7 +2319,7 @@ client.check_api_key()
<dl>
<dd>

Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).

Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
</dd>
Expand Down Expand Up @@ -2599,7 +2599,7 @@ Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
<dl>
<dd>

Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).

Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
</dd>
Expand Down
4 changes: 4 additions & 0 deletions src/cohere/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
ChatContentStartEventDeltaMessage,
ChatContentStartEventDeltaMessageContent,
ChatDataMetrics,
ChatDebugEvent,
ChatDocument,
ChatFinishReason,
ChatMessage,
Expand Down Expand Up @@ -104,6 +105,7 @@
DatasetPart,
DatasetType,
DatasetValidationStatus,
DebugStreamedChatResponse,
DeleteConnectorResponse,
DetokenizeResponse,
Document,
Expand Down Expand Up @@ -297,6 +299,7 @@
"ChatContentStartEventDeltaMessage",
"ChatContentStartEventDeltaMessageContent",
"ChatDataMetrics",
"ChatDebugEvent",
"ChatDocument",
"ChatFinishReason",
"ChatMessage",
Expand Down Expand Up @@ -386,6 +389,7 @@
"DatasetsGetResponse",
"DatasetsGetUsageResponse",
"DatasetsListResponse",
"DebugStreamedChatResponse",
"DeleteConnectorResponse",
"DetokenizeResponse",
"Document",
Expand Down
2 changes: 1 addition & 1 deletion src/cohere/core/client_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "cohere",
"X-Fern-SDK-Version": "5.11.2",
"X-Fern-SDK-Version": "5.11.3",
}
if self._client_name is not None:
headers["X-Client-Name"] = self._client_name
Expand Down
12 changes: 12 additions & 0 deletions src/cohere/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from .chat_content_start_event_delta_message import ChatContentStartEventDeltaMessage
from .chat_content_start_event_delta_message_content import ChatContentStartEventDeltaMessageContent
from .chat_data_metrics import ChatDataMetrics
from .chat_debug_event import ChatDebugEvent
from .chat_document import ChatDocument
from .chat_finish_reason import ChatFinishReason
from .chat_message import ChatMessage
Expand Down Expand Up @@ -110,6 +111,7 @@
from .detokenize_response import DetokenizeResponse
from .document import Document
from .document_content import DocumentContent
from .document_source import DocumentSource
from .embed_by_type_response import EmbedByTypeResponse
from .embed_by_type_response_embeddings import EmbedByTypeResponseEmbeddings
from .embed_floats_response import EmbedFloatsResponse
Expand Down Expand Up @@ -170,6 +172,7 @@
from .source import DocumentSource, Source, ToolSource
from .streamed_chat_response import (
CitationGenerationStreamedChatResponse,
DebugStreamedChatResponse,
SearchQueriesGenerationStreamedChatResponse,
SearchResultsStreamedChatResponse,
StreamEndStreamedChatResponse,
Expand Down Expand Up @@ -197,8 +200,12 @@
from .summarize_request_format import SummarizeRequestFormat
from .summarize_request_length import SummarizeRequestLength
from .summarize_response import SummarizeResponse
from .system_message import SystemMessage
from .system_message_content import SystemMessageContent
from .system_message_content_item import SystemMessageContentItem, TextSystemMessageContentItem
from .text_content import TextContent
from .text_response_format import TextResponseFormat
from .text_response_format_v2 import TextResponseFormatV2
from .tokenize_response import TokenizeResponse
from .too_many_requests_error_body import TooManyRequestsErrorBody
from .tool import Tool
Expand All @@ -207,17 +214,20 @@
from .tool_call_v2 import ToolCallV2
from .tool_call_v2function import ToolCallV2Function
from .tool_content import DocumentToolContent, TextToolContent, ToolContent
from .tool_message import ToolMessage
from .tool_message_v2 import ToolMessageV2
from .tool_message_v2content import ToolMessageV2Content
from .tool_parameter_definitions_value import ToolParameterDefinitionsValue
from .tool_result import ToolResult
from .tool_source import ToolSource
from .tool_v2 import ToolV2
from .tool_v2function import ToolV2Function
from .unprocessable_entity_error_body import UnprocessableEntityErrorBody
from .update_connector_response import UpdateConnectorResponse
from .usage import Usage
from .usage_billed_units import UsageBilledUnits
from .usage_tokens import UsageTokens
from .user_message import UserMessage
from .user_message_content import UserMessageContent

__all__ = [
Expand Down Expand Up @@ -245,6 +255,7 @@
"ChatContentStartEventDeltaMessage",
"ChatContentStartEventDeltaMessageContent",
"ChatDataMetrics",
"ChatDebugEvent",
"ChatDocument",
"ChatFinishReason",
"ChatMessage",
Expand Down Expand Up @@ -324,6 +335,7 @@
"DatasetPart",
"DatasetType",
"DatasetValidationStatus",
"DebugStreamedChatResponse",
"DeleteConnectorResponse",
"DetokenizeResponse",
"Document",
Expand Down
18 changes: 18 additions & 0 deletions src/cohere/types/chat_debug_event.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# This file was auto-generated by Fern from our API Definition.

from .chat_stream_event import ChatStreamEvent
import typing
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic


class ChatDebugEvent(ChatStreamEvent):
prompt: typing.Optional[str] = None

if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow") # type: ignore # Pydantic v2
else:

class Config:
smart_union = True
extra = pydantic.Extra.allow
18 changes: 18 additions & 0 deletions src/cohere/types/streamed_chat_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,23 @@ class Config:
extra = pydantic.Extra.allow


class DebugStreamedChatResponse(UncheckedBaseModel):
"""
StreamedChatResponse is returned in streaming mode (specified with `stream=True` in the request).
"""

event_type: typing.Literal["debug"] = "debug"
prompt: typing.Optional[str] = None

if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow") # type: ignore # Pydantic v2
else:

class Config:
smart_union = True
extra = pydantic.Extra.allow


StreamedChatResponse = typing_extensions.Annotated[
typing.Union[
StreamStartStreamedChatResponse,
Expand All @@ -167,6 +184,7 @@ class Config:
ToolCallsGenerationStreamedChatResponse,
StreamEndStreamedChatResponse,
ToolCallsChunkStreamedChatResponse,
DebugStreamedChatResponse,
],
UnionMetadata(discriminant="event_type"),
]
8 changes: 4 additions & 4 deletions src/cohere/v2/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def chat_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[StreamedChatResponseV2]:
"""
Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
Expand Down Expand Up @@ -401,7 +401,7 @@ def chat(
request_options: typing.Optional[RequestOptions] = None,
) -> ChatResponse:
"""
Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
Expand Down Expand Up @@ -1124,7 +1124,7 @@ async def chat_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[StreamedChatResponseV2]:
"""
Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
Expand Down Expand Up @@ -1460,7 +1460,7 @@ async def chat(
request_options: typing.Optional[RequestOptions] = None,
) -> ChatResponse:
"""
Generates a message from the model in response to a provided conversation. To learn more about the features of the Chat API follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Generates a text response to a user message and streams it down, token by token. To learn how to use the Chat API with streaming follow our [Text Generation guides](https://docs.cohere.com/v2/docs/chat-api).
Follow the [Migration Guide](https://docs.cohere.com/v2/docs/migrating-v1-to-v2) for instructions on moving from API v1 to API v2.
Expand Down

0 comments on commit 18f8709

Please sign in to comment.