Skip to content

Commit

Permalink
feat: Updated OpenAPI spec
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Aug 30, 2024
1 parent c094559 commit 0455383
Show file tree
Hide file tree
Showing 10 changed files with 608 additions and 401 deletions.
17 changes: 14 additions & 3 deletions src/libs/Cohere/Generated/Cohere.CohereApi.Chat.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ partial void ProcessChatResponseContent(
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </param>
/// <param name="model">
/// Defaults to `command-r-plus`.<br/>
/// Defaults to `command-r-plus-08-2024`.<br/>
/// The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.<br/>
/// Compatible Deployments: Cohere Platform, Private Deployments
/// </param>
Expand Down Expand Up @@ -145,7 +145,9 @@ partial void ProcessChatResponseContent(
/// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.<br/>
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.<br/>
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.<br/>
/// Compatible Deployments: Cohere Platform Only AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: <br/>
/// - AUTO: Cohere Platform Only<br/>
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </param>
/// <param name="connectors">
/// Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one.<br/>
Expand Down Expand Up @@ -258,12 +260,19 @@ partial void ProcessChatResponseContent(
/// Forces the chat to be single step. Defaults to `false`.
/// </param>
/// <param name="responseFormat">
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
/// A [JSON Schema](https://json-schema.org/) can optionally be provided, to ensure a specific structure.<br/>
/// **Note**: When using `{ "type": "json_object" }` your `message` should always explicitly instruct the model to generate a JSON (eg: _"Generate a JSON ..."_) . Otherwise the model may end up getting stuck generating an infinite stream of characters and eventually run out of context length.<br/>
/// **Limitation**: The parameter is not supported in RAG mode (when any of `connectors`, `documents`, `tools`, `tool_results` are provided).
/// </param>
/// <param name="safetyMode">
/// Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL.<br/>
/// When `NONE` is specified, the safety instruction will be omitted.<br/>
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.<br/>
/// **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024]((/docs/command-r-plus#august-2024-release) and newer.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::System.OneOf<global::Cohere.NonStreamedChatResponse, global::Cohere.StreamedChatResponse?>> ChatAsync(
Expand Down Expand Up @@ -292,6 +301,7 @@ partial void ProcessChatResponseContent(
global::System.Collections.Generic.IList<global::Cohere.ToolResult>? toolResults = default,
bool forceSingleStep = default,
global::Cohere.ResponseFormat? responseFormat = default,
global::Cohere.ChatRequestSafetyMode? safetyMode = default,
global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::Cohere.ChatRequest
Expand Down Expand Up @@ -320,6 +330,7 @@ partial void ProcessChatResponseContent(
ToolResults = toolResults,
ForceSingleStep = forceSingleStep,
ResponseFormat = responseFormat,
SafetyMode = safetyMode,
};

return await ChatAsync(
Expand Down
19 changes: 16 additions & 3 deletions src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ public sealed partial class ChatRequest
public required string Message { get; set; }

/// <summary>
/// Defaults to `command-r-plus`.<br/>
/// Defaults to `command-r-plus-08-2024`.<br/>
/// The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.<br/>
/// Compatible Deployments: Cohere Platform, Private Deployments
/// </summary>
Expand Down Expand Up @@ -64,7 +64,9 @@ public sealed partial class ChatRequest
/// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.<br/>
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.<br/>
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.<br/>
/// Compatible Deployments: Cohere Platform Only AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: <br/>
/// - AUTO: Cohere Platform Only<br/>
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("prompt_truncation")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenApiGenerator.JsonConverters.ChatRequestPromptTruncationJsonConverter))]
Expand Down Expand Up @@ -230,7 +232,7 @@ public sealed partial class ChatRequest
public bool ForceSingleStep { get; set; }

/// <summary>
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
/// A [JSON Schema](https://json-schema.org/) can optionally be provided, to ensure a specific structure.<br/>
/// **Note**: When using `{ "type": "json_object" }` your `message` should always explicitly instruct the model to generate a JSON (eg: _"Generate a JSON ..."_) . Otherwise the model may end up getting stuck generating an infinite stream of characters and eventually run out of context length.<br/>
Expand All @@ -240,6 +242,17 @@ public sealed partial class ChatRequest
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenApiGenerator.JsonConverters.ResponseFormatJsonConverter))]
public global::Cohere.ResponseFormat? ResponseFormat { get; set; }

/// <summary>
/// Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL.<br/>
/// When `NONE` is specified, the safety instruction will be omitted.<br/>
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.<br/>
/// **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024]((/docs/command-r-plus#august-2024-release) and newer.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("safety_mode")]
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenApiGenerator.JsonConverters.ChatRequestSafetyModeJsonConverter))]
public global::Cohere.ChatRequestSafetyMode? SafetyMode { get; set; }

/// <summary>
/// Additional properties that are not explicitly defined in the schema
/// </summary>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ namespace Cohere
/// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.<br/>
/// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.<br/>
/// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.<br/>
/// Compatible Deployments: Cohere Platform Only AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: <br/>
/// - AUTO: Cohere Platform Only<br/>
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </summary>
public enum ChatRequestPromptTruncation
{
Expand Down
61 changes: 61 additions & 0 deletions src/libs/Cohere/Generated/Cohere.Models.ChatRequestSafetyMode.g.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@

#nullable enable

namespace Cohere
{
/// <summary>
/// Used to select the [safety instruction](/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL.<br/>
/// When `NONE` is specified, the safety instruction will be omitted.<br/>
/// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.<br/>
/// **Note**: This parameter is only compatible with models [Command R 08-2024](/docs/command-r#august-2024-release), [Command R+ 08-2024]((/docs/command-r-plus#august-2024-release) and newer.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// </summary>
public enum ChatRequestSafetyMode
{
/// <summary>
///
/// </summary>
CONTEXTUAL,
/// <summary>
///
/// </summary>
STRICT,
/// <summary>
///
/// </summary>
NONE,
}

/// <summary>
/// Enum extensions to do fast conversions without the reflection.
/// </summary>
public static class ChatRequestSafetyModeExtensions
{
/// <summary>
/// Converts an enum to a string.
/// </summary>
public static string ToValueString(this ChatRequestSafetyMode value)
{
return value switch
{
ChatRequestSafetyMode.CONTEXTUAL => "CONTEXTUAL",
ChatRequestSafetyMode.STRICT => "STRICT",
ChatRequestSafetyMode.NONE => "NONE",
_ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null),
};
}
/// <summary>
/// Converts an string to a enum.
/// </summary>
public static ChatRequestSafetyMode? ToEnum(string value)
{
return value switch
{
"CONTEXTUAL" => ChatRequestSafetyMode.CONTEXTUAL,
"STRICT" => ChatRequestSafetyMode.STRICT,
"NONE" => ChatRequestSafetyMode.NONE,
_ => null,
};
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
namespace Cohere
{
/// <summary>
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R](https://docs.cohere.com/docs/command-r), [Command R+](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
/// A [JSON Schema](https://json-schema.org/) can optionally be provided, to ensure a specific structure.<br/>
/// **Note**: When using `{ "type": "json_object" }` your `message` should always explicitly instruct the model to generate a JSON (eg: _"Generate a JSON ..."_) . Otherwise the model may end up getting stuck generating an infinite stream of characters and eventually run out of context length.<br/>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#nullable enable

namespace OpenApiGenerator.JsonConverters
{
/// <inheritdoc />
public sealed class ChatRequestSafetyModeJsonConverter : global::System.Text.Json.Serialization.JsonConverter<global::Cohere.ChatRequestSafetyMode>
{
/// <inheritdoc />
public override global::Cohere.ChatRequestSafetyMode Read(
ref global::System.Text.Json.Utf8JsonReader reader,
global::System.Type typeToConvert,
global::System.Text.Json.JsonSerializerOptions options)
{
switch (reader.TokenType)
{
case global::System.Text.Json.JsonTokenType.String:
{
var stringValue = reader.GetString();
if (stringValue != null)
{
return global::Cohere.ChatRequestSafetyModeExtensions.ToEnum(stringValue) ?? default;
}

break;
}
case global::System.Text.Json.JsonTokenType.Number:
{
var numValue = reader.GetInt32();
return (global::Cohere.ChatRequestSafetyMode)numValue;
}
default:
throw new global::System.ArgumentOutOfRangeException(nameof(reader));
}

return default;
}

/// <inheritdoc />
public override void Write(
global::System.Text.Json.Utf8JsonWriter writer,
global::Cohere.ChatRequestSafetyMode value,
global::System.Text.Json.JsonSerializerOptions options)
{
writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer));

writer.WriteStringValue(global::Cohere.ChatRequestSafetyModeExtensions.ToValueString(value));
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#nullable enable

namespace OpenApiGenerator.JsonConverters
{
/// <inheritdoc />
public sealed class ChatRequestSafetyModeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter<global::Cohere.ChatRequestSafetyMode?>
{
/// <inheritdoc />
public override global::Cohere.ChatRequestSafetyMode? Read(
ref global::System.Text.Json.Utf8JsonReader reader,
global::System.Type typeToConvert,
global::System.Text.Json.JsonSerializerOptions options)
{
switch (reader.TokenType)
{
case global::System.Text.Json.JsonTokenType.String:
{
var stringValue = reader.GetString();
if (stringValue != null)
{
return global::Cohere.ChatRequestSafetyModeExtensions.ToEnum(stringValue);
}

break;
}
case global::System.Text.Json.JsonTokenType.Number:
{
var numValue = reader.GetInt32();
return (global::Cohere.ChatRequestSafetyMode)numValue;
}
default:
throw new global::System.ArgumentOutOfRangeException(nameof(reader));
}

return default;
}

/// <inheritdoc />
public override void Write(
global::System.Text.Json.Utf8JsonWriter writer,
global::Cohere.ChatRequestSafetyMode? value,
global::System.Text.Json.JsonSerializerOptions options)
{
writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer));

if (value == null)
{
writer.WriteNullValue();
}
else
{
writer.WriteStringValue(global::Cohere.ChatRequestSafetyModeExtensions.ToValueString(value.Value));
}
}
}
}
2 changes: 2 additions & 0 deletions src/libs/Cohere/Generated/JsonSerializerContext.g.cs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ namespace Cohere
typeof(global::OpenApiGenerator.JsonConverters.ChatRequestPromptTruncationNullableJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.ChatRequestCitationQualityJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.ChatRequestCitationQualityNullableJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.ChatRequestSafetyModeJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.ChatRequestSafetyModeNullableJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.Chatv2RequestCitationModeJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.Chatv2RequestCitationModeNullableJsonConverter),
typeof(global::OpenApiGenerator.JsonConverters.GenerateRequestTruncateJsonConverter),
Expand Down
Loading

0 comments on commit 0455383

Please sign in to comment.