Skip to content

Commit

Permalink
feat: Updated OpenAPI spec
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Sep 22, 2024
1 parent 4d0c842 commit f8ed89f
Show file tree
Hide file tree
Showing 54 changed files with 449 additions and 408 deletions.
35 changes: 21 additions & 14 deletions src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -199,24 +199,28 @@ partial void ProcessChatResponseContent(
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="maxInputTokens">
/// The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer.<br/>
/// Input will be truncated according to the `prompt_truncation` parameter.<br/>
/// Compatible Deployments: Cohere Platform
/// Compatible Deployments: Cohere Platform<br/>
/// Included only in requests
/// </param>
/// <param name="k">
/// Ensures only the top `k` most likely tokens are considered for generation at each step.<br/>
/// Defaults to `0`, min value of `0`, max value of `500`.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Default Value: 0
/// Default Value: 0<br/>
/// Included only in requests
/// </param>
/// <param name="p">
/// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.<br/>
/// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Default Value: 0.75
/// Default Value: 0.75<br/>
/// Included only in requests
/// </param>
/// <param name="seed">
/// If specified, the backend will make a best effort to sample tokens<br/>
Expand All @@ -227,17 +231,20 @@ partial void ProcessChatResponseContent(
/// </param>
/// <param name="stopSequences">
/// A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="frequencyPenalty">
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
/// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="presencePenalty">
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="tools">
/// A list of available tools (functions) that the model may suggest invoking before producing a text response.<br/>
Expand Down Expand Up @@ -288,6 +295,13 @@ partial void ProcessChatResponseContent(
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::Cohere.OneOf<global::Cohere.NonStreamedChatResponse, global::Cohere.StreamedChatResponse?>> ChatAsync(
string message,
int maxTokens,
int maxInputTokens,
int k,
double p,
global::System.Collections.Generic.IList<string> stopSequences,
double frequencyPenalty,
double presencePenalty,
string? xClientName = default,
global::Cohere.ChatAccepts? accepts = default,
string? model = default,
Expand All @@ -301,14 +315,7 @@ partial void ProcessChatResponseContent(
global::System.Collections.Generic.IList<global::Cohere.ChatDocument>? documents = default,
global::Cohere.ChatRequestCitationQuality? citationQuality = default,
float? temperature = default,
int? maxTokens = default,
int? maxInputTokens = default,
int? k = 0,
double? p = 0.75,
int? seed = default,
global::System.Collections.Generic.IList<string>? stopSequences = default,
double? frequencyPenalty = default,
double? presencePenalty = default,
global::System.Collections.Generic.IList<global::Cohere.Tool>? tools = default,
global::System.Collections.Generic.IList<global::Cohere.ToolResult>? toolResults = default,
bool? forceSingleStep = default,
Expand Down
21 changes: 13 additions & 8 deletions src/libs/Cohere/Generated/Cohere.CohereApi.Classify.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -115,34 +115,39 @@ partial void ProcessClassifyResponseContent(
/// <param name="inputs">
/// A list of up to 96 texts to be classified. Each one must be a non-empty string.<br/>
/// There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models).<br/>
/// Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.
/// Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.<br/>
/// Included only in requests
/// </param>
/// <param name="examples">
/// An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.<br/>
/// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
/// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID.
/// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID.<br/>
/// Included only in requests
/// </param>
/// <param name="preset">
/// The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.com/playground/classify?model=large). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters.<br/>
/// Included only in requests<br/>
/// Example: my-preset-a58sbd
/// </param>
/// <param name="truncate">
/// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.<br/>
/// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.<br/>
/// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.<br/>
/// Default Value: END
/// Default Value: END<br/>
/// Included only in requests
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::Cohere.ClassifyResponse> ClassifyAsync(
global::System.Collections.Generic.IList<string> inputs,
global::System.Collections.Generic.IList<global::Cohere.ClassifyExample> examples,
string model,
string preset,
global::Cohere.ClassifyRequestTruncate truncate,
string? xClientName = default,
global::System.Collections.Generic.IList<global::Cohere.ClassifyExample>? examples = default,
string? model = default,
string? preset = default,
global::Cohere.ClassifyRequestTruncate? truncate = global::Cohere.ClassifyRequestTruncate.END,
global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::Cohere.ClassifyRequest
Expand Down
3 changes: 2 additions & 1 deletion src/libs/Cohere/Generated/Cohere.CohereApi.Detokenize.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ partial void ProcessDetokenizeResponseContent(
/// </summary>
/// <param name="xClientName"></param>
/// <param name="tokens">
/// The list of tokens to be detokenized.
/// The list of tokens to be detokenized.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
Expand Down
20 changes: 12 additions & 8 deletions src/libs/Cohere/Generated/Cohere.CohereApi.Embed.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,8 @@ partial void ProcessEmbedResponseContent(
/// </summary>
/// <param name="xClientName"></param>
/// <param name="texts">
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// Defaults to embed-english-v2.0<br/>
Expand All @@ -127,7 +128,8 @@ partial void ProcessEmbedResponseContent(
/// * `embed-multilingual-light-v3.0` 384<br/>
/// * `embed-english-v2.0` 4096<br/>
/// * `embed-english-light-v2.0` 1024<br/>
/// * `embed-multilingual-v2.0` 768
/// * `embed-multilingual-v2.0` 768<br/>
/// Included only in requests
/// </param>
/// <param name="inputType">
/// Specifies the type of input passed to the model. Required for embedding models v3 and higher.<br/>
Expand All @@ -143,23 +145,25 @@ partial void ProcessEmbedResponseContent(
/// * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.<br/>
/// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.<br/>
/// * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.<br/>
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.<br/>
/// Included only in requests
/// </param>
/// <param name="truncate">
/// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.<br/>
/// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.<br/>
/// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.<br/>
/// Default Value: END
/// Default Value: END<br/>
/// Included only in requests
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::Cohere.OneOf<global::Cohere.EmbedFloatsResponse, global::Cohere.EmbedByTypeResponse>> EmbedAsync(
global::System.Collections.Generic.IList<string> texts,
string model,
global::System.Collections.Generic.IList<global::Cohere.EmbeddingType> embeddingTypes,
global::Cohere.EmbedRequestTruncate truncate,
string? xClientName = default,
global::System.Collections.Generic.IList<string>? texts = default,
string? model = default,
global::Cohere.EmbedInputType? inputType = default,
global::System.Collections.Generic.IList<global::Cohere.EmbeddingType>? embeddingTypes = default,
global::Cohere.EmbedRequestTruncate? truncate = global::Cohere.EmbedRequestTruncate.END,
global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::Cohere.EmbedRequest
Expand Down
18 changes: 11 additions & 7 deletions src/libs/Cohere/Generated/Cohere.CohereApi.Embedv2.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,8 @@ partial void ProcessEmbedv2ResponseContent(
/// </summary>
/// <param name="xClientName"></param>
/// <param name="texts">
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// Defaults to embed-english-v2.0<br/>
Expand All @@ -127,7 +128,8 @@ partial void ProcessEmbedv2ResponseContent(
/// * `embed-multilingual-light-v3.0` 384<br/>
/// * `embed-english-v2.0` 4096<br/>
/// * `embed-english-light-v2.0` 1024<br/>
/// * `embed-multilingual-v2.0` 768
/// * `embed-multilingual-v2.0` 768<br/>
/// Included only in requests
/// </param>
/// <param name="inputType">
/// Specifies the type of input passed to the model. Required for embedding models v3 and higher.<br/>
Expand All @@ -143,23 +145,25 @@ partial void ProcessEmbedv2ResponseContent(
/// * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.<br/>
/// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.<br/>
/// * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.<br/>
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.<br/>
/// Included only in requests
/// </param>
/// <param name="truncate">
/// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.<br/>
/// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.<br/>
/// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.<br/>
/// Default Value: END
/// Default Value: END<br/>
/// Included only in requests
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::Cohere.EmbedByTypeResponse> Embedv2Async(
global::System.Collections.Generic.IList<string> texts,
string model,
global::System.Collections.Generic.IList<global::Cohere.EmbeddingType> embeddingTypes,
global::Cohere.Embedv2RequestTruncate truncate,
string? xClientName = default,
global::System.Collections.Generic.IList<string>? texts = default,
global::Cohere.EmbedInputType? inputType = default,
global::System.Collections.Generic.IList<global::Cohere.EmbeddingType>? embeddingTypes = default,
global::Cohere.Embedv2RequestTruncate? truncate = global::Cohere.Embedv2RequestTruncate.END,
global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::Cohere.Embedv2Request
Expand Down
Loading

0 comments on commit f8ed89f

Please sign in to comment.