From 0ee1044fdedff115dd7167808109ebfa1a1bae9a Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 16:13:17 +0000 Subject: [PATCH] SDK regeneration --- client/client.go | 11 +++++++++-- core/request_option.go | 2 +- finetuning/types.go | 6 +++--- types.go | 12 ++++++++++++ 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/client/client.go b/client/client.go index 9f7ef92..8ddf8e1 100644 --- a/client/client.go +++ b/client/client.go @@ -7,6 +7,7 @@ import ( context "context" json "encoding/json" errors "errors" + fmt "fmt" v2 "github.com/cohere-ai/cohere-go/v2" connectors "github.com/cohere-ai/cohere-go/v2/connectors" core "github.com/cohere-ai/cohere-go/v2/core" @@ -55,7 +56,7 @@ func NewClient(opts ...option.RequestOption) *Client { } // Generates a text response to a user message. -// To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +// To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). func (c *Client) ChatStream( ctx context.Context, request *v2.ChatStreamRequest, @@ -73,6 +74,9 @@ func (c *Client) ChatStream( endpointURL := baseURL + "/v1/chat" headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + if request.Accepts != nil { + headers.Add("Accepts", fmt.Sprintf("%v", request.Accepts)) + } errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -179,7 +183,7 @@ func (c *Client) ChatStream( } // Generates a text response to a user message. -// To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). +// To learn how to use the Chat API and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). func (c *Client) Chat( ctx context.Context, request *v2.ChatRequest, @@ -197,6 +201,9 @@ func (c *Client) Chat( endpointURL := baseURL + "/v1/chat" headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + if request.Accepts != nil { + headers.Add("Accepts", fmt.Sprintf("%v", request.Accepts)) + } errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) diff --git a/core/request_option.go b/core/request_option.go index f616b59..86d8c80 100644 --- a/core/request_option.go +++ b/core/request_option.go @@ -56,7 +56,7 @@ func (r *RequestOptions) cloneHeader() http.Header { headers := r.HTTPHeader.Clone() headers.Set("X-Fern-Language", "Go") headers.Set("X-Fern-SDK-Name", "github.com/cohere-ai/cohere-go/v2") - headers.Set("X-Fern-SDK-Version", "v2.11.0") + headers.Set("X-Fern-SDK-Version", "v2.11.1") return headers } diff --git a/finetuning/types.go b/finetuning/types.go index f01ac32..ffed78a 100644 --- a/finetuning/types.go +++ b/finetuning/types.go @@ -61,7 +61,7 @@ func (b *BaseModel) String() string { // The possible types of fine-tuned models. // // - BASE_TYPE_UNSPECIFIED: Unspecified model. -// - BASE_TYPE_GENERATIVE: Generative model. +// - BASE_TYPE_GENERATIVE: Deprecated: Generative model. // - BASE_TYPE_CLASSIFICATION: Classification model. // - BASE_TYPE_RERANK: Rerank model. // - BASE_TYPE_CHAT: Chat model. @@ -546,7 +546,7 @@ type Settings struct { Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty" url:"hyperparameters,omitempty"` // read-only. Whether the model is single-label or multi-label (only for classification). MultiLabel *bool `json:"multi_label,omitempty" url:"multi_label,omitempty"` - // The Weights & Biases configuration. + // The Weights & Biases configuration (Chat fine-tuning only). Wandb *WandbConfig `json:"wandb,omitempty" url:"wandb,omitempty"` extraProperties map[string]interface{} @@ -644,7 +644,7 @@ func (s Status) Ptr() *Status { // The possible strategy used to serve a fine-tuned models. // // - STRATEGY_UNSPECIFIED: Unspecified strategy. -// - STRATEGY_VANILLA: Serve the fine-tuned model on a dedicated GPU. +// - STRATEGY_VANILLA: Deprecated: Serve the fine-tuned model on a dedicated GPU. // - STRATEGY_TFEW: Serve the fine-tuned model on a shared GPU. type Strategy string diff --git a/types.go b/types.go index 1e21b06..e8cbc0e 100644 --- a/types.go +++ b/types.go @@ -10,6 +10,8 @@ import ( ) type ChatRequest struct { + // Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. + Accepts *string `json:"-" url:"-"` // Text input for the model to respond to. // // Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments @@ -237,6 +239,8 @@ func (c *ChatRequest) MarshalJSON() ([]byte, error) { } type ChatStreamRequest struct { + // Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events. + Accepts *string `json:"-" url:"-"` // Text input for the model to respond to. // // Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments @@ -491,6 +495,10 @@ type DetokenizeRequest struct { type EmbedRequest struct { // An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality. Texts []string `json:"texts,omitempty" url:"-"` + // An array of image data URIs for the model to embed. Maximum number of images per call is `1`. + // + // The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB. + Images []string `json:"images,omitempty" url:"-"` // Defaults to embed-english-v2.0 // // The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. @@ -3174,6 +3182,7 @@ func (e *EmbedFloatsResponse) String() string { // - `"search_query"`: Used for embeddings of search queries run against a vector DB to find relevant documents. // - `"classification"`: Used for embeddings passed through a text classifier. // - `"clustering"`: Used for the embeddings run through a clustering algorithm. +// - `"image"`: Used for embeddings with image input. type EmbedInputType string const ( @@ -3181,6 +3190,7 @@ const ( EmbedInputTypeSearchQuery EmbedInputType = "search_query" EmbedInputTypeClassification EmbedInputType = "classification" EmbedInputTypeClustering EmbedInputType = "clustering" + EmbedInputTypeImage EmbedInputType = "image" ) func NewEmbedInputTypeFromString(s string) (EmbedInputType, error) { @@ -3193,6 +3203,8 @@ func NewEmbedInputTypeFromString(s string) (EmbedInputType, error) { return EmbedInputTypeClassification, nil case "clustering": return EmbedInputTypeClustering, nil + case "image": + return EmbedInputTypeImage, nil } var t EmbedInputType return "", fmt.Errorf("%s is not a valid %T", s, t)