diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestDeveloperMessageRole.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestDeveloperMessageRole.g.cs new file mode 100644 index 00000000..968307a7 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestDeveloperMessageRole.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionRequestDeveloperMessageRoleJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionRequestDeveloperMessageRole Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionRequestDeveloperMessageRoleExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionRequestDeveloperMessageRole)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionRequestDeveloperMessageRole value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.ChatCompletionRequestDeveloperMessageRoleExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestDeveloperMessageRoleNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestDeveloperMessageRoleNullable.g.cs new file mode 100644 index 00000000..32bad652 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestDeveloperMessageRoleNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionRequestDeveloperMessageRoleNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionRequestDeveloperMessageRole? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionRequestDeveloperMessageRoleExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionRequestDeveloperMessageRole)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionRequestDeveloperMessageRole? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.ChatCompletionRequestDeveloperMessageRoleExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessage.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessage.g.cs index 5f7ebc2d..13b691a0 100644 --- a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessage.g.cs +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessage.g.cs @@ -21,6 +21,13 @@ public class ChatCompletionRequestMessageJsonConverter : global::System.Text.Jso throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::OpenAI.ChatCompletionRequestMessageDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); + global::OpenAI.ChatCompletionRequestDeveloperMessage? developer = default; + if (discriminator?.Role == global::OpenAI.ChatCompletionRequestMessageDiscriminatorRole.Developer) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::OpenAI.ChatCompletionRequestDeveloperMessage), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::OpenAI.ChatCompletionRequestDeveloperMessage)}"); + developer = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } global::OpenAI.ChatCompletionRequestSystemMessage? system = default; if (discriminator?.Role == global::OpenAI.ChatCompletionRequestMessageDiscriminatorRole.System) { @@ -59,6 +66,7 @@ public class ChatCompletionRequestMessageJsonConverter : global::System.Text.Jso var result = new global::OpenAI.ChatCompletionRequestMessage( discriminator?.Role, + developer, system, user, assistant, @@ -78,7 +86,13 @@ public override void Write( options = options ?? throw new global::System.ArgumentNullException(nameof(options)); var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); - if (value.IsSystem) + if (value.IsDeveloper) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::OpenAI.ChatCompletionRequestDeveloperMessage), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestDeveloperMessage).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Developer, typeInfo); + } + else if (value.IsSystem) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::OpenAI.ChatCompletionRequestSystemMessage), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestSystemMessage).Name}"); diff --git a/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestReasoningEffort.g.cs b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestReasoningEffort.g.cs new file mode 100644 index 00000000..1a94ac4d --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestReasoningEffort.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestReasoningEffortJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.CreateChatCompletionRequestReasoningEffort Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.CreateChatCompletionRequestReasoningEffortExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.CreateChatCompletionRequestReasoningEffort)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.CreateChatCompletionRequestReasoningEffort value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.CreateChatCompletionRequestReasoningEffortExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestReasoningEffortNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestReasoningEffortNullable.g.cs new file mode 100644 index 00000000..b634b983 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestReasoningEffortNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestReasoningEffortNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.CreateChatCompletionRequestReasoningEffort? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.CreateChatCompletionRequestReasoningEffortExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.CreateChatCompletionRequestReasoningEffort)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.CreateChatCompletionRequestReasoningEffort? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.CreateChatCompletionRequestReasoningEffortExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBatchSize.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBatchSize.g.cs new file mode 100644 index 00000000..d286da97 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBatchSize.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersBatchSizeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersBatchSizeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersBatchSizeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBatchSizeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBatchSizeNullable.g.cs new file mode 100644 index 00000000..ab3cfea2 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBatchSizeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersBatchSizeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersBatchSizeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersBatchSizeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBeta.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBeta.g.cs new file mode 100644 index 00000000..4c48f170 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBeta.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersBetaJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersBeta Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersBetaExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersBeta)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersBeta value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersBetaExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBetaNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBetaNullable.g.cs new file mode 100644 index 00000000..4babcdee --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersBetaNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersBetaNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersBeta? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersBetaExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersBeta)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersBeta? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersBetaExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplier.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplier.g.cs new file mode 100644 index 00000000..e544789d --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplier.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersLearningRateMultiplierJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplierExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplierExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplierNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplierNullable.g.cs new file mode 100644 index 00000000..d26d12d2 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplierNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersLearningRateMultiplierNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplierExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplierExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersNEpochs.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersNEpochs.g.cs new file mode 100644 index 00000000..b159246a --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersNEpochs.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersNEpochsJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersNEpochsExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersNEpochsExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersNEpochsNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersNEpochsNullable.g.cs new file mode 100644 index 00000000..230ca9e9 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneDPOMethodHyperparametersNEpochsNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneDPOMethodHyperparametersNEpochsNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneDPOMethodHyperparametersNEpochsExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneDPOMethodHyperparametersNEpochsExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneMethodType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneMethodType.g.cs new file mode 100644 index 00000000..6d4398bc --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneMethodType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneMethodTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneMethodType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneMethodTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneMethodType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneMethodType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneMethodTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneMethodTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneMethodTypeNullable.g.cs new file mode 100644 index 00000000..59122c39 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneMethodTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneMethodTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneMethodType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneMethodTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneMethodType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneMethodType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneMethodTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSize.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSize.g.cs new file mode 100644 index 00000000..8fa9591a --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSize.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneSupervisedMethodHyperparametersBatchSizeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSizeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSizeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSizeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSizeNullable.g.cs new file mode 100644 index 00000000..93a7c8ad --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSizeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneSupervisedMethodHyperparametersBatchSizeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSizeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSizeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.g.cs new file mode 100644 index 00000000..1f5e2909 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneSupervisedMethodHyperparametersLearningRateMultiplierJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierNullable.g.cs new file mode 100644 index 00000000..20d4ea8d --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneSupervisedMethodHyperparametersLearningRateMultiplierNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochs.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochs.g.cs new file mode 100644 index 00000000..04f15067 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochs.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneSupervisedMethodHyperparametersNEpochsJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochsExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochsExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochsNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochsNullable.g.cs new file mode 100644 index 00000000..98703b80 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochsNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuneSupervisedMethodHyperparametersNEpochsNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochsExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochsExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobEventType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobEventType.g.cs new file mode 100644 index 00000000..5f2c9e2f --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobEventType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuningJobEventTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuningJobEventType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuningJobEventTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuningJobEventType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuningJobEventType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuningJobEventTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobEventTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobEventTypeNullable.g.cs new file mode 100644 index 00000000..25df6d99 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobEventTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuningJobEventTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuningJobEventType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuningJobEventTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuningJobEventType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuningJobEventType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuningJobEventTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersBatchSize.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersBatchSize.g.cs new file mode 100644 index 00000000..47a03ae0 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersBatchSize.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuningJobHyperparametersBatchSizeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuningJobHyperparametersBatchSize Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuningJobHyperparametersBatchSizeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuningJobHyperparametersBatchSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuningJobHyperparametersBatchSize value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuningJobHyperparametersBatchSizeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersBatchSizeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersBatchSizeNullable.g.cs new file mode 100644 index 00000000..f1795833 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersBatchSizeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuningJobHyperparametersBatchSizeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuningJobHyperparametersBatchSize? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuningJobHyperparametersBatchSizeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuningJobHyperparametersBatchSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuningJobHyperparametersBatchSize? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuningJobHyperparametersBatchSizeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersLearningRateMultiplier.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersLearningRateMultiplier.g.cs new file mode 100644 index 00000000..9fbdc88e --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersLearningRateMultiplier.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuningJobHyperparametersLearningRateMultiplierJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplierExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplierExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersLearningRateMultiplierNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersLearningRateMultiplierNullable.g.cs new file mode 100644 index 00000000..cc2819e3 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.FineTuningJobHyperparametersLearningRateMultiplierNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class FineTuningJobHyperparametersLearningRateMultiplierNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplierExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplierExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsConversation.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsConversation.g.cs new file mode 100644 index 00000000..eef73f76 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsConversation.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsConversationJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsConversation Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsConversationExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsConversation)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsConversation value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsConversationExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsConversationNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsConversationNullable.g.cs new file mode 100644 index 00000000..70f304e2 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsConversationNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsConversationNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsConversation? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsConversationExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsConversation)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsConversation? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsConversationExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokens.g.cs new file mode 100644 index 00000000..06f78559 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsMaxResponseOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokensNullable.g.cs new file mode 100644 index 00000000..0f1d9847 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsMaxResponseOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsModalitie.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsModalitie.g.cs new file mode 100644 index 00000000..7606e87f --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsModalitie.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsModalitieJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsModalitie Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsModalitieExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsModalitie value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsModalitieExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsModalitieNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsModalitieNullable.g.cs new file mode 100644 index 00000000..37268653 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsModalitieNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsModalitieNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsModalitie? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsModalitieExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsModalitie? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsModalitieExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsOutputAudioFormat.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsOutputAudioFormat.g.cs new file mode 100644 index 00000000..2557b7ea --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsOutputAudioFormat.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsOutputAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsOutputAudioFormatNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsOutputAudioFormatNullable.g.cs new file mode 100644 index 00000000..8629ebed --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsOutputAudioFormatNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsOutputAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsToolType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsToolType.g.cs new file mode 100644 index 00000000..0cefc205 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsToolType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsToolTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsToolType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsToolTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsToolType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsToolTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsToolTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsToolTypeNullable.g.cs new file mode 100644 index 00000000..a3381d9d --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsToolTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsToolTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsToolType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsToolTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsToolType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsToolTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsVoice.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsVoice.g.cs new file mode 100644 index 00000000..16aee4ff --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsVoice.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsVoiceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsVoice Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsVoiceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsVoice value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsVoiceExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsVoiceNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsVoiceNullable.g.cs new file mode 100644 index 00000000..edc65e3d --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeResponseCreateParamsVoiceNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeResponseCreateParamsVoiceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeResponseCreateParamsVoice? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeResponseCreateParamsVoiceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeResponseCreateParamsVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeResponseCreateParamsVoice? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeResponseCreateParamsVoiceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitName.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitName.g.cs new file mode 100644 index 00000000..6e95aae4 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitName.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventRateLimitsUpdatedRateLimitNameJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitNameExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitNameExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitNameNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitNameNullable.g.cs new file mode 100644 index 00000000..9fbf192c --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitNameNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventRateLimitsUpdatedRateLimitNameNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitNameExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitNameExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventResponseContentPartDonePartType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventResponseContentPartDonePartType.g.cs new file mode 100644 index 00000000..10080bbd --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventResponseContentPartDonePartType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartDonePartTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventResponseContentPartDonePartType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventResponseContentPartDonePartTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventResponseContentPartDonePartType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventResponseContentPartDonePartType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeServerEventResponseContentPartDonePartTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventResponseContentPartDonePartTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventResponseContentPartDonePartTypeNullable.g.cs new file mode 100644 index 00000000..96ea5519 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventResponseContentPartDonePartTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartDonePartTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventResponseContentPartDonePartType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventResponseContentPartDonePartTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventResponseContentPartDonePartType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventResponseContentPartDonePartType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeServerEventResponseContentPartDonePartTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestInputAudioFormat.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestInputAudioFormat.g.cs new file mode 100644 index 00000000..b2078b87 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestInputAudioFormat.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestInputAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestInputAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestInputAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestInputAudioFormatNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestInputAudioFormatNullable.g.cs new file mode 100644 index 00000000..be479e52 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestInputAudioFormatNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestInputAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestInputAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestInputAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokens.g.cs new file mode 100644 index 00000000..676c6c17 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestMaxResponseOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokensNullable.g.cs new file mode 100644 index 00000000..bd0059a8 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestMaxResponseOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModalitie.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModalitie.g.cs new file mode 100644 index 00000000..2bfe0fc6 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModalitie.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestModalitieJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestModalitie Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestModalitieExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestModalitie value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestModalitieExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModalitieNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModalitieNullable.g.cs new file mode 100644 index 00000000..3a8b491e --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModalitieNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestModalitieNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestModalitie? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestModalitieExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestModalitie? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestModalitieExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModel.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModel.g.cs new file mode 100644 index 00000000..4260254e --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModel.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestModelJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestModel Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestModelExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestModel)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestModel value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestModelExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModelNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModelNullable.g.cs new file mode 100644 index 00000000..9af27594 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestModelNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestModelNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestModel? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestModelExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestModel)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestModel? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestModelExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestOutputAudioFormat.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestOutputAudioFormat.g.cs new file mode 100644 index 00000000..eaad884a --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestOutputAudioFormat.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestOutputAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestOutputAudioFormatNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestOutputAudioFormatNullable.g.cs new file mode 100644 index 00000000..e4d55e8a --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestOutputAudioFormatNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestOutputAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestToolType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestToolType.g.cs new file mode 100644 index 00000000..654b6ce1 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestToolType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestToolTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestToolType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestToolTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestToolType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestToolTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestToolTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestToolTypeNullable.g.cs new file mode 100644 index 00000000..47d30d76 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestToolTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestToolTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestToolType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestToolTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestToolType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestToolTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestVoice.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestVoice.g.cs new file mode 100644 index 00000000..0a681445 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestVoice.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestVoiceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestVoice Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestVoiceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestVoice value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestVoiceExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestVoiceNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestVoiceNullable.g.cs new file mode 100644 index 00000000..7854f1bd --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateRequestVoiceNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateRequestVoiceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateRequestVoice? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateRequestVoiceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateRequestVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateRequestVoice? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateRequestVoiceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokens.g.cs new file mode 100644 index 00000000..699f8f69 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseMaxResponseOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokensNullable.g.cs new file mode 100644 index 00000000..9076a228 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseMaxResponseOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseModalitie.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseModalitie.g.cs new file mode 100644 index 00000000..da8d8af4 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseModalitie.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseModalitieJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseModalitie Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseModalitieExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseModalitie value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseModalitieExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseModalitieNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseModalitieNullable.g.cs new file mode 100644 index 00000000..256cdcb8 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseModalitieNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseModalitieNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseModalitie? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseModalitieExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseModalitie? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseModalitieExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseToolType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseToolType.g.cs new file mode 100644 index 00000000..6730f5b8 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseToolType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseToolTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseToolType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseToolTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseToolType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseToolTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseToolTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseToolTypeNullable.g.cs new file mode 100644 index 00000000..d6e6a76e --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseToolTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseToolTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseToolType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseToolTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseToolType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseToolTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseVoice.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseVoice.g.cs new file mode 100644 index 00000000..2ea11c4b --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseVoice.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseVoiceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseVoice Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseVoiceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseVoice value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseVoiceExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseVoiceNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseVoiceNullable.g.cs new file mode 100644 index 00000000..1bad5a71 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeSessionCreateResponseVoiceNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeSessionCreateResponseVoiceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeSessionCreateResponseVoice? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeSessionCreateResponseVoiceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeSessionCreateResponseVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeSessionCreateResponseVoice? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeSessionCreateResponseVoiceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs b/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs index 8bfb9f9c..7d2700d5 100644 --- a/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs +++ b/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs @@ -251,6 +251,8 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageContentPartDiscriminatorTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestDeveloperMessageRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestDeveloperMessageRoleNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestFunctionMessageRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestFunctionMessageRoleNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestSystemMessageRoleJsonConverter), @@ -303,6 +305,8 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestReasoningEffortJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestReasoningEffortNullableJsonConverter), typeof(global::OpenAI.JsonConverters.PredictionContentTypeJsonConverter), typeof(global::OpenAI.JsonConverters.PredictionContentTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter), @@ -353,6 +357,22 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestIntegrationTypeJsonConverter), typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestIntegrationTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneMethodTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneMethodTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSizeJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneSupervisedMethodHyperparametersBatchSizeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneSupervisedMethodHyperparametersLearningRateMultiplierNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochsJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneSupervisedMethodHyperparametersNEpochsNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersBetaJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersBetaNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersBatchSizeJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersBatchSizeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplierJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersLearningRateMultiplierNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersNEpochsJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuneDPOMethodHyperparametersNEpochsNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageEditRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageEditRequestModelNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageEditRequestSizeJsonConverter), @@ -463,6 +483,10 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.DeleteVectorStoreResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningIntegrationTypeJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningIntegrationTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersBatchSizeJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersBatchSizeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersLearningRateMultiplierJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersLearningRateMultiplierNullableJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersNEpochsJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersNEpochsNullableJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobObjectJsonConverter), @@ -471,10 +495,12 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.FineTuningJobStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobCheckpointObjectJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobCheckpointObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelNullableJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectJsonConverter), typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteObjectJsonConverter), typeof(global::OpenAI.JsonConverters.InviteObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteRoleJsonConverter), @@ -593,20 +619,34 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCancelTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCreateTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCreateTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensJsonConverter), - typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsModalitieJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsModalitieNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsVoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsVoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsOutputAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsOutputAudioFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsToolTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsToolTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsMaxResponseOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsConversationJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsConversationNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeClientEventSessionUpdateTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeClientEventSessionUpdateTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestModalitieJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestModalitieNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestVoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestVoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestInputAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestInputAudioFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestOutputAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestOutputAudioFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestToolTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestToolTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestMaxResponseOutputTokensNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeResponseStatusJsonConverter), @@ -635,6 +675,8 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitNameJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitNameNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseAudioDeltaTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseAudioDeltaTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseAudioDoneTypeJsonConverter), @@ -649,6 +691,8 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseContentPartDoneTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseContentPartDoneTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseContentPartDonePartTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseContentPartDonePartTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseCreatedTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseCreatedTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseDoneTypeJsonConverter), @@ -667,8 +711,28 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseTextDoneTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionCreatedTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionCreatedTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionVoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionTurnDetectionTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionToolChoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionMaxOutputTokensNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionUpdatedTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionUpdatedTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseModalitieJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseModalitieNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseVoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseVoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseToolTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseToolTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseMaxResponseOutputTokensNullableJsonConverter), typeof(global::OpenAI.JsonConverters.UploadStatusJsonConverter), typeof(global::OpenAI.JsonConverters.UploadStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.UploadObjectJsonConverter), @@ -912,6 +976,7 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.ResultItemJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter>), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter>), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter>), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter>), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter>), @@ -929,6 +994,13 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.AnyOfJsonConverter), typeof(global::OpenAI.JsonConverters.AnyOfJsonConverter), typeof(global::OpenAI.JsonConverters.AnyOfJsonConverter), @@ -940,10 +1012,19 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.AnyOfJsonConverter), typeof(global::OpenAI.JsonConverters.AnyOfJsonConverter), typeof(global::OpenAI.JsonConverters.AnyOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), - typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverter), diff --git a/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs b/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs index d9ccb0d8..d85d4050 100644 --- a/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs +++ b/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs @@ -1442,3130 +1442,3418 @@ public sealed partial class JsonSerializerContextTypes /// /// /// - public global::OpenAI.ChatCompletionRequestFunctionMessage? Type356 { get; set; } + public global::OpenAI.ChatCompletionRequestDeveloperMessage? Type356 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestFunctionMessageRole? Type357 { get; set; } + public global::OpenAI.OneOf>? Type357 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessage? Type358 { get; set; } + public global::System.Collections.Generic.IList? Type358 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestSystemMessage? Type359 { get; set; } + public global::OpenAI.ChatCompletionRequestDeveloperMessageRole? Type359 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type360 { get; set; } + public global::OpenAI.ChatCompletionRequestFunctionMessage? Type360 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type361 { get; set; } + public global::OpenAI.ChatCompletionRequestFunctionMessageRole? Type361 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestSystemMessageContentPart? Type362 { get; set; } + public global::OpenAI.ChatCompletionRequestMessage? Type362 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestSystemMessageRole? Type363 { get; set; } + public global::OpenAI.ChatCompletionRequestSystemMessage? Type363 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessage? Type364 { get; set; } + public global::OpenAI.OneOf>? Type364 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type365 { get; set; } + public global::System.Collections.Generic.IList? Type365 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type366 { get; set; } + public global::OpenAI.ChatCompletionRequestSystemMessageContentPart? Type366 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessageContentPart? Type367 { get; set; } + public global::OpenAI.ChatCompletionRequestSystemMessageRole? Type367 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImage? Type368 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessage? Type368 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImageType? Type369 { get; set; } + public global::OpenAI.OneOf>? Type369 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrl? Type370 { get; set; } + public global::System.Collections.Generic.IList? Type370 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrlDetail? Type371 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessageContentPart? Type371 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartAudio? Type372 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImage? Type372 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartAudioType? Type373 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImageType? Type373 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio? Type374 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrl? Type374 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? Type375 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrlDetail? Type375 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessageContentPartDiscriminator? Type376 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudio? Type376 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessageContentPartDiscriminatorType? Type377 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioType? Type377 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessageRole? Type378 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio? Type378 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestToolMessage? Type379 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? Type379 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestToolMessageRole? Type380 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessageContentPartDiscriminator? Type380 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type381 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessageContentPartDiscriminatorType? Type381 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type382 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessageRole? Type382 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestToolMessageContentPart? Type383 { get; set; } + public global::OpenAI.ChatCompletionRequestToolMessage? Type383 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageDiscriminator? Type384 { get; set; } + public global::OpenAI.ChatCompletionRequestToolMessageRole? Type384 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageDiscriminatorRole? Type385 { get; set; } + public global::OpenAI.OneOf>? Type385 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessage? Type386 { get; set; } + public global::System.Collections.Generic.IList? Type386 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessageRole? Type387 { get; set; } + public global::OpenAI.ChatCompletionRequestToolMessageContentPart? Type387 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessageFunctionCall? Type388 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageDiscriminator? Type388 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessageAudio? Type389 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageDiscriminatorRole? Type389 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRole? Type390 { get; set; } + public global::OpenAI.ChatCompletionResponseMessage? Type390 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamOptions? Type391 { get; set; } + public global::OpenAI.ChatCompletionResponseMessageRole? Type391 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamResponseDelta? Type392 { get; set; } + public global::OpenAI.ChatCompletionResponseMessageFunctionCall? Type392 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamResponseDeltaFunctionCall? Type393 { get; set; } + public global::OpenAI.ChatCompletionResponseMessageAudio? Type393 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type394 { get; set; } + public global::OpenAI.ChatCompletionRole? Type394 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamResponseDeltaRole? Type395 { get; set; } + public global::OpenAI.ChatCompletionStreamOptions? Type395 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionTokenLogprob? Type396 { get; set; } + public global::OpenAI.ChatCompletionStreamResponseDelta? Type396 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type397 { get; set; } + public global::OpenAI.ChatCompletionStreamResponseDeltaFunctionCall? Type397 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type398 { get; set; } + public global::System.Collections.Generic.IList? Type398 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionTokenLogprobTopLogprob? Type399 { get; set; } + public global::OpenAI.ChatCompletionStreamResponseDeltaRole? Type399 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionTool? Type400 { get; set; } + public global::OpenAI.ChatCompletionTokenLogprob? Type400 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionToolType? Type401 { get; set; } + public global::System.Collections.Generic.IList? Type401 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionToolChoiceOption? Type402 { get; set; } + public global::System.Collections.Generic.IList? Type402 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionToolChoiceOptionEnum? Type403 { get; set; } + public global::OpenAI.ChatCompletionTokenLogprobTopLogprob? Type403 { get; set; } /// /// /// - public global::OpenAI.ChunkingStrategyRequestParam? Type404 { get; set; } + public global::OpenAI.ChatCompletionTool? Type404 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyRequestParam? Type405 { get; set; } + public global::OpenAI.ChatCompletionToolType? Type405 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyRequestParamType? Type406 { get; set; } + public global::OpenAI.ChatCompletionToolChoiceOption? Type406 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategy? Type407 { get; set; } + public global::OpenAI.ChatCompletionToolChoiceOptionEnum? Type407 { get; set; } /// /// /// - public global::OpenAI.ChunkingStrategyRequestParamDiscriminator? Type408 { get; set; } + public global::OpenAI.ChunkingStrategyRequestParam? Type408 { get; set; } /// /// /// - public global::OpenAI.ChunkingStrategyRequestParamDiscriminatorType? Type409 { get; set; } + public global::OpenAI.StaticChunkingStrategyRequestParam? Type409 { get; set; } /// /// /// - public global::OpenAI.CompleteUploadRequest? Type410 { get; set; } + public global::OpenAI.StaticChunkingStrategyRequestParamType? Type410 { get; set; } /// /// /// - public global::OpenAI.CompletionUsage? Type411 { get; set; } + public global::OpenAI.StaticChunkingStrategy? Type411 { get; set; } /// /// /// - public global::OpenAI.CompletionUsageCompletionTokensDetails? Type412 { get; set; } + public global::OpenAI.ChunkingStrategyRequestParamDiscriminator? Type412 { get; set; } /// /// /// - public global::OpenAI.CompletionUsagePromptTokensDetails? Type413 { get; set; } + public global::OpenAI.ChunkingStrategyRequestParamDiscriminatorType? Type413 { get; set; } /// /// /// - public global::OpenAI.CostsResult? Type414 { get; set; } + public global::OpenAI.CompleteUploadRequest? Type414 { get; set; } /// /// /// - public global::OpenAI.CostsResultObject? Type415 { get; set; } + public global::OpenAI.CompletionUsage? Type415 { get; set; } /// /// /// - public global::OpenAI.CostsResultAmount? Type416 { get; set; } + public global::OpenAI.CompletionUsageCompletionTokensDetails? Type416 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequest? Type417 { get; set; } + public global::OpenAI.CompletionUsagePromptTokensDetails? Type417 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type418 { get; set; } + public global::OpenAI.CostsResult? Type418 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestModel? Type419 { get; set; } + public global::OpenAI.CostsResultObject? Type419 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type420 { get; set; } + public global::OpenAI.CostsResultAmount? Type420 { get; set; } /// /// /// - public global::OpenAI.ToolsItem2? Type421 { get; set; } + public global::OpenAI.CreateAssistantRequest? Type421 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolDiscriminator? Type422 { get; set; } + public global::OpenAI.AnyOf? Type422 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolDiscriminatorType? Type423 { get; set; } + public global::OpenAI.CreateAssistantRequestModel? Type423 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResources? Type424 { get; set; } + public global::System.Collections.Generic.IList? Type424 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesCodeInterpreter? Type425 { get; set; } + public global::OpenAI.ToolsItem2? Type425 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearch? Type426 { get; set; } + public global::OpenAI.CreateAssistantRequestToolDiscriminator? Type426 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type427 { get; set; } + public global::OpenAI.CreateAssistantRequestToolDiscriminatorType? Type427 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStore? Type428 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResources? Type428 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type429 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesCodeInterpreter? Type429 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type430 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearch? Type430 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type431 { get; set; } + public global::System.Collections.Generic.IList? Type431 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type432 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStore? Type432 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type433 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type433 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type434 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type434 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type435 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type435 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type436 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type436 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponse? Type437 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type437 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type438 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type438 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponseChoice? Type439 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type439 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponseChoiceFinishReason? Type440 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type440 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponseObject? Type441 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponse? Type441 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequest? Type442 { get; set; } + public global::System.Collections.Generic.IList? Type442 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type443 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponseChoice? Type443 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type444 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponseChoiceFinishReason? Type444 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestModel? Type445 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponseObject? Type445 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type446 { get; set; } + public global::OpenAI.CreateChatCompletionRequest? Type446 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type447 { get; set; } + public global::System.Collections.Generic.IList? Type447 { get; set; } /// /// /// - public global::OpenAI.PredictionContent? Type448 { get; set; } + public global::OpenAI.AnyOf? Type448 { get; set; } /// /// /// - public global::OpenAI.PredictionContentType? Type449 { get; set; } + public global::OpenAI.CreateChatCompletionRequestModel? Type449 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type450 { get; set; } + public global::OpenAI.CreateChatCompletionRequestReasoningEffort? Type450 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type451 { get; set; } + public global::System.Collections.Generic.Dictionary? Type451 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestAudio? Type452 { get; set; } + public global::System.Collections.Generic.Dictionary? Type452 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestAudioVoice? Type453 { get; set; } + public global::OpenAI.PredictionContent? Type453 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestAudioFormat? Type454 { get; set; } + public global::OpenAI.PredictionContentType? Type454 { get; set; } /// /// /// - public global::OpenAI.ResponseFormat? Type455 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudio? Type455 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminator? Type456 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudioVoice? Type456 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminatorType? Type457 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudioFormat? Type457 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestServiceTier? Type458 { get; set; } + public global::OpenAI.ResponseFormat? Type458 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type459 { get; set; } + public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminator? Type459 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type460 { get; set; } + public global::OpenAI.CreateChatCompletionRequestResponseFormatDiscriminatorType? Type460 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type461 { get; set; } + public global::OpenAI.CreateChatCompletionRequestServiceTier? Type461 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestFunctionCall? Type462 { get; set; } + public global::OpenAI.OneOf>? Type462 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type463 { get; set; } + public global::System.Collections.Generic.IList? Type463 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponse? Type464 { get; set; } + public global::OpenAI.OneOf? Type464 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type465 { get; set; } + public global::OpenAI.CreateChatCompletionRequestFunctionCall? Type465 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoice? Type466 { get; set; } + public global::System.Collections.Generic.IList? Type466 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoiceFinishReason? Type467 { get; set; } + public global::OpenAI.CreateChatCompletionResponse? Type467 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoiceLogprobs? Type468 { get; set; } + public global::System.Collections.Generic.IList? Type468 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type469 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoice? Type469 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseServiceTier? Type470 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoiceFinishReason? Type470 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseObject? Type471 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoiceLogprobs? Type471 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponse? Type472 { get; set; } + public global::System.Collections.Generic.IList? Type472 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type473 { get; set; } + public global::OpenAI.CreateChatCompletionResponseServiceTier? Type473 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoice? Type474 { get; set; } + public global::OpenAI.CreateChatCompletionResponseObject? Type474 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoiceLogprobs? Type475 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponse? Type475 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoiceFinishReason? Type476 { get; set; } + public global::System.Collections.Generic.IList? Type476 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseServiceTier? Type477 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoice? Type477 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseObject? Type478 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoiceLogprobs? Type478 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseUsage? Type479 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoiceFinishReason? Type479 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionRequest? Type480 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseServiceTier? Type480 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type481 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseObject? Type481 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionRequestModel? Type482 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseUsage? Type482 { get; set; } /// /// /// - public global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type483 { get; set; } + public global::OpenAI.CreateCompletionRequest? Type483 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type484 { get; set; } + public global::OpenAI.AnyOf? Type484 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponse? Type485 { get; set; } + public global::OpenAI.CreateCompletionRequestModel? Type485 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type486 { get; set; } + public global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type486 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoice? Type487 { get; set; } + public global::System.Collections.Generic.IList>? Type487 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoiceFinishReason? Type488 { get; set; } + public global::OpenAI.CreateCompletionResponse? Type488 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoiceLogprobs? Type489 { get; set; } + public global::System.Collections.Generic.IList? Type489 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type490 { get; set; } + public global::OpenAI.CreateCompletionResponseChoice? Type490 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type491 { get; set; } + public global::OpenAI.CreateCompletionResponseChoiceFinishReason? Type491 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type492 { get; set; } + public global::OpenAI.CreateCompletionResponseChoiceLogprobs? Type492 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseObject? Type493 { get; set; } + public global::System.Collections.Generic.IList? Type493 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequest? Type494 { get; set; } + public global::System.Collections.Generic.IList>? Type494 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type495 { get; set; } + public global::System.Collections.Generic.Dictionary? Type495 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequestModel? Type496 { get; set; } + public global::OpenAI.CreateCompletionResponseObject? Type496 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequestEncodingFormat? Type497 { get; set; } + public global::OpenAI.CreateEmbeddingRequest? Type497 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponse? Type498 { get; set; } + public global::OpenAI.AnyOf? Type498 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type499 { get; set; } + public global::OpenAI.CreateEmbeddingRequestModel? Type499 { get; set; } /// /// /// - public global::OpenAI.Embedding? Type500 { get; set; } + public global::OpenAI.CreateEmbeddingRequestEncodingFormat? Type500 { get; set; } /// /// /// - public global::OpenAI.EmbeddingObject? Type501 { get; set; } + public global::OpenAI.CreateEmbeddingResponse? Type501 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponseObject? Type502 { get; set; } + public global::System.Collections.Generic.IList? Type502 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponseUsage? Type503 { get; set; } + public global::OpenAI.Embedding? Type503 { get; set; } /// /// /// - public global::OpenAI.CreateFileRequest? Type504 { get; set; } + public global::OpenAI.EmbeddingObject? Type504 { get; set; } /// /// /// - public global::OpenAI.CreateFileRequestPurpose? Type505 { get; set; } + public global::OpenAI.CreateEmbeddingResponseObject? Type505 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequest? Type506 { get; set; } + public global::OpenAI.CreateEmbeddingResponseUsage? Type506 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type507 { get; set; } + public global::OpenAI.CreateFileRequest? Type507 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestModel? Type508 { get; set; } + public global::OpenAI.CreateFileRequestPurpose? Type508 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Type509 { get; set; } + public global::OpenAI.CreateFineTuningJobRequest? Type509 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type510 { get; set; } + public global::OpenAI.AnyOf? Type510 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersBatchSize? Type511 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestModel? Type511 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type512 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Type512 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type513 { get; set; } + public global::OpenAI.OneOf? Type513 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type514 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersBatchSize? Type514 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersNEpochs? Type515 { get; set; } + public global::OpenAI.OneOf? Type515 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type516 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type516 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegration? Type517 { get; set; } + public global::OpenAI.OneOf? Type517 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegrationType? Type518 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersNEpochs? Type518 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegrationWandb? Type519 { get; set; } + public global::System.Collections.Generic.IList? Type519 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequest? Type520 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegration? Type520 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type521 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegrationType? Type521 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestModel? Type522 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegrationWandb? Type522 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestSize? Type523 { get; set; } + public global::OpenAI.FineTuneMethod? Type523 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestResponseFormat? Type524 { get; set; } + public global::OpenAI.FineTuneMethodType? Type524 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequest? Type525 { get; set; } + public global::OpenAI.FineTuneSupervisedMethod? Type525 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type526 { get; set; } + public global::OpenAI.FineTuneSupervisedMethodHyperparameters? Type526 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestModel? Type527 { get; set; } + public global::OpenAI.OneOf? Type527 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestQuality? Type528 { get; set; } + public global::OpenAI.FineTuneSupervisedMethodHyperparametersBatchSize? Type528 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestResponseFormat? Type529 { get; set; } + public global::OpenAI.OneOf? Type529 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestSize? Type530 { get; set; } + public global::OpenAI.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier? Type530 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestStyle? Type531 { get; set; } + public global::OpenAI.OneOf? Type531 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequest? Type532 { get; set; } + public global::OpenAI.FineTuneSupervisedMethodHyperparametersNEpochs? Type532 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type533 { get; set; } + public global::OpenAI.FineTuneDPOMethod? Type533 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestModel? Type534 { get; set; } + public global::OpenAI.FineTuneDPOMethodHyperparameters? Type534 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestResponseFormat? Type535 { get; set; } + public global::OpenAI.OneOf? Type535 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestSize? Type536 { get; set; } + public global::OpenAI.FineTuneDPOMethodHyperparametersBeta? Type536 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequest? Type537 { get; set; } + public global::OpenAI.OneOf? Type537 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestRole? Type538 { get; set; } + public global::OpenAI.FineTuneDPOMethodHyperparametersBatchSize? Type538 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type539 { get; set; } + public global::OpenAI.OneOf? Type539 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type540 { get; set; } + public global::OpenAI.FineTuneDPOMethodHyperparametersLearningRateMultiplier? Type540 { get; set; } /// /// /// - public global::OpenAI.ContentVariant2Item? Type541 { get; set; } + public global::OpenAI.OneOf? Type541 { get; set; } /// /// /// - public global::OpenAI.MessageRequestContentTextObject? Type542 { get; set; } + public global::OpenAI.FineTuneDPOMethodHyperparametersNEpochs? Type542 { get; set; } /// /// /// - public global::OpenAI.MessageRequestContentTextObjectType? Type543 { get; set; } + public global::OpenAI.CreateImageEditRequest? Type543 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminator? Type544 { get; set; } + public global::OpenAI.AnyOf? Type544 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminatorType? Type545 { get; set; } + public global::OpenAI.CreateImageEditRequestModel? Type545 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type546 { get; set; } + public global::OpenAI.CreateImageEditRequestSize? Type546 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachment? Type547 { get; set; } + public global::OpenAI.CreateImageEditRequestResponseFormat? Type547 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type548 { get; set; } + public global::OpenAI.CreateImageRequest? Type548 { get; set; } /// /// /// - public global::OpenAI.ToolsItem3? Type549 { get; set; } + public global::OpenAI.AnyOf? Type549 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminator? Type550 { get; set; } + public global::OpenAI.CreateImageRequestModel? Type550 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminatorType? Type551 { get; set; } + public global::OpenAI.CreateImageRequestQuality? Type551 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequest? Type552 { get; set; } + public global::OpenAI.CreateImageRequestResponseFormat? Type552 { get; set; } /// /// /// - public global::OpenAI.OneOf, global::System.Collections.Generic.IList>? Type553 { get; set; } + public global::OpenAI.CreateImageRequestSize? Type553 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type554 { get; set; } + public global::OpenAI.CreateImageRequestStyle? Type554 { get; set; } /// /// /// - public global::OpenAI.InputVariant3Item? Type555 { get; set; } + public global::OpenAI.CreateImageVariationRequest? Type555 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1? Type556 { get; set; } + public global::OpenAI.AnyOf? Type556 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1Type? Type557 { get; set; } + public global::OpenAI.CreateImageVariationRequestModel? Type557 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type558 { get; set; } + public global::OpenAI.CreateImageVariationRequestResponseFormat? Type558 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2? Type559 { get; set; } + public global::OpenAI.CreateImageVariationRequestSize? Type559 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2Type? Type560 { get; set; } + public global::OpenAI.CreateMessageRequest? Type560 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminator? Type561 { get; set; } + public global::OpenAI.CreateMessageRequestRole? Type561 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type562 { get; set; } + public global::OpenAI.OneOf>? Type562 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type563 { get; set; } + public global::System.Collections.Generic.IList? Type563 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestModel? Type564 { get; set; } + public global::OpenAI.ContentVariant2Item? Type564 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponse? Type565 { get; set; } + public global::OpenAI.MessageRequestContentTextObject? Type565 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type566 { get; set; } + public global::OpenAI.MessageRequestContentTextObjectType? Type566 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResult? Type567 { get; set; } + public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminator? Type567 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategories? Type568 { get; set; } + public global::OpenAI.CreateMessageRequestContentVariant2ItemDiscriminatorType? Type568 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryScores? Type569 { get; set; } + public global::System.Collections.Generic.IList? Type569 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypes? Type570 { get; set; } + public global::OpenAI.CreateMessageRequestAttachment? Type570 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type571 { get; set; } + public global::System.Collections.Generic.IList? Type571 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type572 { get; set; } + public global::OpenAI.ToolsItem3? Type572 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type573 { get; set; } + public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminator? Type573 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type574 { get; set; } + public global::OpenAI.CreateMessageRequestAttachmentToolDiscriminatorType? Type574 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type575 { get; set; } + public global::OpenAI.CreateModerationRequest? Type575 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type576 { get; set; } + public global::OpenAI.OneOf, global::System.Collections.Generic.IList>? Type576 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type577 { get; set; } + public global::System.Collections.Generic.IList? Type577 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type578 { get; set; } + public global::OpenAI.InputVariant3Item? Type578 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type579 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1? Type579 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type580 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1Type? Type580 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type581 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type581 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type582 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2? Type582 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type583 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2Type? Type583 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type584 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminator? Type584 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type585 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type585 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type586 { get; set; } + public global::OpenAI.AnyOf? Type586 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type587 { get; set; } + public global::OpenAI.CreateModerationRequestModel? Type587 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type588 { get; set; } + public global::OpenAI.CreateModerationResponse? Type588 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type589 { get; set; } + public global::System.Collections.Generic.IList? Type589 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type590 { get; set; } + public global::OpenAI.CreateModerationResponseResult? Type590 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type591 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategories? Type591 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type592 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryScores? Type592 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type593 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypes? Type593 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type594 { get; set; } + public global::System.Collections.Generic.IList? Type594 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type595 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type595 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type596 { get; set; } + public global::System.Collections.Generic.IList? Type596 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequest? Type597 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type597 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type598 { get; set; } + public global::System.Collections.Generic.IList? Type598 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestModel? Type599 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type599 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type600 { get; set; } + public global::System.Collections.Generic.IList? Type600 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type601 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type601 { get; set; } /// /// /// - public global::OpenAI.ToolsItem4? Type602 { get; set; } + public global::System.Collections.Generic.IList? Type602 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestToolDiscriminator? Type603 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type603 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestToolDiscriminatorType? Type604 { get; set; } + public global::System.Collections.Generic.IList? Type604 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequest? Type605 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type605 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type606 { get; set; } + public global::System.Collections.Generic.IList? Type606 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestModel? Type607 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type607 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestVoice? Type608 { get; set; } + public global::System.Collections.Generic.IList? Type608 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestResponseFormat? Type609 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type609 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequest? Type610 { get; set; } + public global::System.Collections.Generic.IList? Type610 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequest? Type611 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type611 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResources? Type612 { get; set; } + public global::System.Collections.Generic.IList? Type612 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesCodeInterpreter? Type613 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type613 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearch? Type614 { get; set; } + public global::System.Collections.Generic.IList? Type614 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type615 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type615 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStore? Type616 { get; set; } + public global::System.Collections.Generic.IList? Type616 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type617 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type617 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type618 { get; set; } + public global::System.Collections.Generic.IList? Type618 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type619 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type619 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type620 { get; set; } + public global::OpenAI.CreateRunRequest? Type620 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type621 { get; set; } + public global::OpenAI.AnyOf? Type621 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type622 { get; set; } + public global::OpenAI.CreateRunRequestModel? Type622 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type623 { get; set; } + public global::System.Collections.Generic.IList? Type623 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type624 { get; set; } + public global::System.Collections.Generic.IList? Type624 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type625 { get; set; } + public global::OpenAI.ToolsItem4? Type625 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestModel? Type626 { get; set; } + public global::OpenAI.CreateRunRequestToolDiscriminator? Type626 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type627 { get; set; } + public global::OpenAI.CreateRunRequestToolDiscriminatorType? Type627 { get; set; } /// /// /// - public global::OpenAI.ToolsItem5? Type628 { get; set; } + public global::OpenAI.CreateSpeechRequest? Type628 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolDiscriminator? Type629 { get; set; } + public global::OpenAI.AnyOf? Type629 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolDiscriminatorType? Type630 { get; set; } + public global::OpenAI.CreateSpeechRequestModel? Type630 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResources? Type631 { get; set; } + public global::OpenAI.CreateSpeechRequestVoice? Type631 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type632 { get; set; } + public global::OpenAI.CreateSpeechRequestResponseFormat? Type632 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch? Type633 { get; set; } + public global::OpenAI.CreateThreadAndRunRequest? Type633 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequest? Type634 { get; set; } + public global::OpenAI.CreateThreadRequest? Type634 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type635 { get; set; } + public global::OpenAI.CreateThreadRequestToolResources? Type635 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequestModel? Type636 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesCodeInterpreter? Type636 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type637 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearch? Type637 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequestTimestampGranularitie? Type638 { get; set; } + public global::System.Collections.Generic.IList? Type638 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionResponseJson? Type639 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStore? Type639 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionResponseVerboseJson? Type640 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type640 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type641 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type641 { get; set; } /// /// /// - public global::OpenAI.TranscriptionWord? Type642 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type642 { get; set; } /// /// /// - public float? Type643 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type643 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type644 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type644 { get; set; } /// /// /// - public global::OpenAI.TranscriptionSegment? Type645 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type645 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationRequest? Type646 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type646 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type647 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type647 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationRequestModel? Type648 { get; set; } + public global::OpenAI.AnyOf? Type648 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationResponseJson? Type649 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestModel? Type649 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationResponseVerboseJson? Type650 { get; set; } + public global::System.Collections.Generic.IList? Type650 { get; set; } /// /// /// - public global::OpenAI.CreateUploadRequest? Type651 { get; set; } + public global::OpenAI.ToolsItem5? Type651 { get; set; } /// /// /// - public global::OpenAI.CreateUploadRequestPurpose? Type652 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolDiscriminator? Type652 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreFileBatchRequest? Type653 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolDiscriminatorType? Type653 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreFileRequest? Type654 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResources? Type654 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequest? Type655 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type655 { get; set; } /// /// /// - public global::OpenAI.VectorStoreExpirationAfter? Type656 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch? Type656 { get; set; } /// /// /// - public global::OpenAI.VectorStoreExpirationAfterAnchor? Type657 { get; set; } + public global::OpenAI.CreateTranscriptionRequest? Type657 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequestChunkingStrategy? Type658 { get; set; } + public global::OpenAI.AnyOf? Type658 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminator? Type659 { get; set; } + public global::OpenAI.CreateTranscriptionRequestModel? Type659 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminatorType? Type660 { get; set; } + public global::System.Collections.Generic.IList? Type660 { get; set; } /// /// /// - public global::OpenAI.DefaultProjectErrorResponse? Type661 { get; set; } + public global::OpenAI.CreateTranscriptionRequestTimestampGranularitie? Type661 { get; set; } /// /// /// - public global::OpenAI.DeleteAssistantResponse? Type662 { get; set; } + public global::OpenAI.CreateTranscriptionResponseJson? Type662 { get; set; } /// /// /// - public global::OpenAI.DeleteAssistantResponseObject? Type663 { get; set; } + public global::OpenAI.CreateTranscriptionResponseVerboseJson? Type663 { get; set; } /// /// /// - public global::OpenAI.DeleteFileResponse? Type664 { get; set; } + public global::System.Collections.Generic.IList? Type664 { get; set; } /// /// /// - public global::OpenAI.DeleteFileResponseObject? Type665 { get; set; } + public global::OpenAI.TranscriptionWord? Type665 { get; set; } /// /// /// - public global::OpenAI.DeleteMessageResponse? Type666 { get; set; } + public float? Type666 { get; set; } /// /// /// - public global::OpenAI.DeleteMessageResponseObject? Type667 { get; set; } + public global::System.Collections.Generic.IList? Type667 { get; set; } /// /// /// - public global::OpenAI.DeleteModelResponse? Type668 { get; set; } + public global::OpenAI.TranscriptionSegment? Type668 { get; set; } /// /// /// - public global::OpenAI.DeleteThreadResponse? Type669 { get; set; } + public global::OpenAI.CreateTranslationRequest? Type669 { get; set; } /// /// /// - public global::OpenAI.DeleteThreadResponseObject? Type670 { get; set; } + public global::OpenAI.AnyOf? Type670 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreFileResponse? Type671 { get; set; } + public global::OpenAI.CreateTranslationRequestModel? Type671 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreFileResponseObject? Type672 { get; set; } + public global::OpenAI.CreateTranslationResponseJson? Type672 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreResponse? Type673 { get; set; } + public global::OpenAI.CreateTranslationResponseVerboseJson? Type673 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreResponseObject? Type674 { get; set; } + public global::OpenAI.CreateUploadRequest? Type674 { get; set; } /// /// /// - public global::OpenAI.ErrorResponse? Type675 { get; set; } + public global::OpenAI.CreateUploadRequestPurpose? Type675 { get; set; } /// /// /// - public global::OpenAI.FineTuneChatCompletionRequestAssistantMessage? Type676 { get; set; } + public global::OpenAI.CreateVectorStoreFileBatchRequest? Type676 { get; set; } /// /// /// - public global::OpenAI.FineTuneChatCompletionRequestAssistantMessageVariant1? Type677 { get; set; } + public global::OpenAI.CreateVectorStoreFileRequest? Type677 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegration? Type678 { get; set; } + public global::OpenAI.CreateVectorStoreRequest? Type678 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegrationType? Type679 { get; set; } + public global::OpenAI.VectorStoreExpirationAfter? Type679 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegrationWandb? Type680 { get; set; } + public global::OpenAI.VectorStoreExpirationAfterAnchor? Type680 { get; set; } /// /// /// - public global::OpenAI.FineTuningJob? Type681 { get; set; } + public global::OpenAI.CreateVectorStoreRequestChunkingStrategy? Type681 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobError? Type682 { get; set; } + public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminator? Type682 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobHyperparameters? Type683 { get; set; } + public global::OpenAI.CreateVectorStoreRequestChunkingStrategyDiscriminatorType? Type683 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type684 { get; set; } + public global::OpenAI.DefaultProjectErrorResponse? Type684 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobHyperparametersNEpochs? Type685 { get; set; } + public global::OpenAI.DeleteAssistantResponse? Type685 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobObject? Type686 { get; set; } + public global::OpenAI.DeleteAssistantResponseObject? Type686 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobStatus? Type687 { get; set; } + public global::OpenAI.DeleteFileResponse? Type687 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type688 { get; set; } + public global::OpenAI.DeleteFileResponseObject? Type688 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type689 { get; set; } + public global::OpenAI.DeleteMessageResponse? Type689 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpoint? Type690 { get; set; } + public global::OpenAI.DeleteMessageResponseObject? Type690 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpointMetrics? Type691 { get; set; } + public global::OpenAI.DeleteModelResponse? Type691 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpointObject? Type692 { get; set; } + public global::OpenAI.DeleteThreadResponse? Type692 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEvent? Type693 { get; set; } + public global::OpenAI.DeleteThreadResponseObject? Type693 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEventLevel? Type694 { get; set; } + public global::OpenAI.DeleteVectorStoreFileResponse? Type694 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEventObject? Type695 { get; set; } + public global::OpenAI.DeleteVectorStoreFileResponseObject? Type695 { get; set; } /// /// /// - public global::OpenAI.FinetuneChatRequestInput? Type696 { get; set; } + public global::OpenAI.DeleteVectorStoreResponse? Type696 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type697 { get; set; } + public global::OpenAI.DeleteVectorStoreResponseObject? Type697 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type698 { get; set; } + public global::OpenAI.ErrorResponse? Type698 { get; set; } /// /// /// - public global::OpenAI.FinetuneCompletionRequestInput? Type699 { get; set; } + public global::OpenAI.FineTuneChatCompletionRequestAssistantMessage? Type699 { get; set; } /// /// /// - public global::OpenAI.Image? Type700 { get; set; } + public global::OpenAI.FineTuneChatCompletionRequestAssistantMessageVariant1? Type700 { get; set; } /// /// /// - public global::OpenAI.ImagesResponse? Type701 { get; set; } + public global::OpenAI.FineTuneChatRequestInput? Type701 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type702 { get; set; } + public global::System.Collections.Generic.IList>? Type702 { get; set; } /// /// /// - public global::OpenAI.Invite? Type703 { get; set; } + public global::OpenAI.OneOf? Type703 { get; set; } /// /// /// - public global::OpenAI.InviteObject? Type704 { get; set; } + public global::OpenAI.FineTuneCompletionRequestInput? Type704 { get; set; } /// /// /// - public global::OpenAI.InviteRole? Type705 { get; set; } + public global::OpenAI.FineTunePreferenceRequestInput? Type705 { get; set; } /// /// /// - public global::OpenAI.InviteStatus? Type706 { get; set; } + public global::OpenAI.FineTunePreferenceRequestInputInput? Type706 { get; set; } /// /// /// - public global::OpenAI.InviteDeleteResponse? Type707 { get; set; } + public global::System.Collections.Generic.IList>? Type707 { get; set; } /// /// /// - public global::OpenAI.InviteDeleteResponseObject? Type708 { get; set; } + public global::OpenAI.OneOf? Type708 { get; set; } /// /// /// - public global::OpenAI.InviteListResponse? Type709 { get; set; } + public global::OpenAI.FineTuningIntegration? Type709 { get; set; } /// /// /// - public global::OpenAI.InviteListResponseObject? Type710 { get; set; } + public global::OpenAI.FineTuningIntegrationType? Type710 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type711 { get; set; } + public global::OpenAI.FineTuningIntegrationWandb? Type711 { get; set; } /// /// /// - public global::OpenAI.InviteRequest? Type712 { get; set; } + public global::OpenAI.FineTuningJob? Type712 { get; set; } /// /// /// - public global::OpenAI.InviteRequestRole? Type713 { get; set; } + public global::OpenAI.FineTuningJobError? Type713 { get; set; } /// /// /// - public global::OpenAI.ListAssistantsResponse? Type714 { get; set; } + public global::OpenAI.FineTuningJobHyperparameters? Type714 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type715 { get; set; } + public global::OpenAI.OneOf? Type715 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsResponse? Type716 { get; set; } + public global::OpenAI.FineTuningJobHyperparametersBatchSize? Type716 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsResponseObject? Type717 { get; set; } + public global::OpenAI.OneOf? Type717 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type718 { get; set; } + public global::OpenAI.FineTuningJobHyperparametersLearningRateMultiplier? Type718 { get; set; } /// /// /// - public global::OpenAI.ListBatchesResponse? Type719 { get; set; } + public global::OpenAI.OneOf? Type719 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type720 { get; set; } + public global::OpenAI.FineTuningJobHyperparametersNEpochs? Type720 { get; set; } /// /// /// - public global::OpenAI.ListBatchesResponseObject? Type721 { get; set; } + public global::OpenAI.FineTuningJobObject? Type721 { get; set; } /// /// /// - public global::OpenAI.ListFilesResponse? Type722 { get; set; } + public global::OpenAI.FineTuningJobStatus? Type722 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type723 { get; set; } + public global::System.Collections.Generic.IList>? Type723 { get; set; } /// /// /// - public global::OpenAI.OpenAIFile? Type724 { get; set; } + public global::OpenAI.OneOf? Type724 { get; set; } /// /// /// - public global::OpenAI.OpenAIFileObject? Type725 { get; set; } + public global::OpenAI.FineTuningJobCheckpoint? Type725 { get; set; } /// /// /// - public global::OpenAI.OpenAIFilePurpose? Type726 { get; set; } + public global::OpenAI.FineTuningJobCheckpointMetrics? Type726 { get; set; } /// /// /// - public global::OpenAI.OpenAIFileStatus? Type727 { get; set; } + public global::OpenAI.FineTuningJobCheckpointObject? Type727 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobCheckpointsResponse? Type728 { get; set; } + public global::OpenAI.FineTuningJobEvent? Type728 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type729 { get; set; } + public global::OpenAI.FineTuningJobEventObject? Type729 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobCheckpointsResponseObject? Type730 { get; set; } + public global::OpenAI.FineTuningJobEventLevel? Type730 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobEventsResponse? Type731 { get; set; } + public global::OpenAI.FineTuningJobEventType? Type731 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type732 { get; set; } + public global::OpenAI.Image? Type732 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobEventsResponseObject? Type733 { get; set; } + public global::OpenAI.ImagesResponse? Type733 { get; set; } /// /// /// - public global::OpenAI.ListMessagesResponse? Type734 { get; set; } + public global::System.Collections.Generic.IList? Type734 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type735 { get; set; } + public global::OpenAI.Invite? Type735 { get; set; } /// /// /// - public global::OpenAI.ListModelsResponse? Type736 { get; set; } + public global::OpenAI.InviteObject? Type736 { get; set; } /// /// /// - public global::OpenAI.ListModelsResponseObject? Type737 { get; set; } + public global::OpenAI.InviteRole? Type737 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type738 { get; set; } + public global::OpenAI.InviteStatus? Type738 { get; set; } /// /// /// - public global::OpenAI.Model15? Type739 { get; set; } + public global::OpenAI.InviteDeleteResponse? Type739 { get; set; } /// /// /// - public global::OpenAI.ModelObject? Type740 { get; set; } + public global::OpenAI.InviteDeleteResponseObject? Type740 { get; set; } /// /// /// - public global::OpenAI.ListPaginatedFineTuningJobsResponse? Type741 { get; set; } + public global::OpenAI.InviteListResponse? Type741 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type742 { get; set; } + public global::OpenAI.InviteListResponseObject? Type742 { get; set; } /// /// /// - public global::OpenAI.ListPaginatedFineTuningJobsResponseObject? Type743 { get; set; } + public global::System.Collections.Generic.IList? Type743 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsResponse? Type744 { get; set; } + public global::OpenAI.InviteRequest? Type744 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type745 { get; set; } + public global::OpenAI.InviteRequestRole? Type745 { get; set; } /// /// /// - public global::OpenAI.ListRunsResponse? Type746 { get; set; } + public global::OpenAI.ListAssistantsResponse? Type746 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type747 { get; set; } + public global::System.Collections.Generic.IList? Type747 { get; set; } /// /// /// - public global::OpenAI.ListThreadsResponse? Type748 { get; set; } + public global::OpenAI.ListAuditLogsResponse? Type748 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type749 { get; set; } + public global::OpenAI.ListAuditLogsResponseObject? Type749 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesResponse? Type750 { get; set; } + public global::System.Collections.Generic.IList? Type750 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type751 { get; set; } + public global::OpenAI.ListBatchesResponse? Type751 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObject? Type752 { get; set; } + public global::System.Collections.Generic.IList? Type752 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectObject? Type753 { get; set; } + public global::OpenAI.ListBatchesResponseObject? Type753 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectStatus? Type754 { get; set; } + public global::OpenAI.ListFilesResponse? Type754 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectLastError? Type755 { get; set; } + public global::System.Collections.Generic.IList? Type755 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectLastErrorCode? Type756 { get; set; } + public global::OpenAI.OpenAIFile? Type756 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectChunkingStrategy? Type757 { get; set; } + public global::OpenAI.OpenAIFileObject? Type757 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyResponseParam? Type758 { get; set; } + public global::OpenAI.OpenAIFilePurpose? Type758 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyResponseParamType? Type759 { get; set; } + public global::OpenAI.OpenAIFileStatus? Type759 { get; set; } /// /// /// - public global::OpenAI.OtherChunkingStrategyResponseParam? Type760 { get; set; } + public global::OpenAI.ListFineTuningJobCheckpointsResponse? Type760 { get; set; } /// /// /// - public global::OpenAI.OtherChunkingStrategyResponseParamType? Type761 { get; set; } + public global::System.Collections.Generic.IList? Type761 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminator? Type762 { get; set; } + public global::OpenAI.ListFineTuningJobCheckpointsResponseObject? Type762 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminatorType? Type763 { get; set; } + public global::OpenAI.ListFineTuningJobEventsResponse? Type763 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoresResponse? Type764 { get; set; } + public global::System.Collections.Generic.IList? Type764 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type765 { get; set; } + public global::OpenAI.ListFineTuningJobEventsResponseObject? Type765 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObject? Type766 { get; set; } + public global::OpenAI.ListMessagesResponse? Type766 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectObject? Type767 { get; set; } + public global::System.Collections.Generic.IList? Type767 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectFileCounts? Type768 { get; set; } + public global::OpenAI.ListModelsResponse? Type768 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectStatus? Type769 { get; set; } + public global::OpenAI.ListModelsResponseObject? Type769 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequest? Type770 { get; set; } + public global::System.Collections.Generic.IList? Type770 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type771 { get; set; } + public global::OpenAI.Model15? Type771 { get; set; } /// /// /// - public global::OpenAI.ToolsItem7? Type772 { get; set; } + public global::OpenAI.ModelObject? Type772 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolDiscriminator? Type773 { get; set; } + public global::OpenAI.ListPaginatedFineTuningJobsResponse? Type773 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolDiscriminatorType? Type774 { get; set; } + public global::System.Collections.Generic.IList? Type774 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResources? Type775 { get; set; } + public global::OpenAI.ListPaginatedFineTuningJobsResponseObject? Type775 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResourcesCodeInterpreter? Type776 { get; set; } + public global::OpenAI.ListRunStepsResponse? Type776 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResourcesFileSearch? Type777 { get; set; } + public global::System.Collections.Generic.IList? Type777 { get; set; } /// /// /// - public global::OpenAI.ModifyMessageRequest? Type778 { get; set; } + public global::OpenAI.ListRunsResponse? Type778 { get; set; } /// /// /// - public global::OpenAI.ModifyRunRequest? Type779 { get; set; } + public global::System.Collections.Generic.IList? Type779 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequest? Type780 { get; set; } + public global::OpenAI.ListThreadsResponse? Type780 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResources? Type781 { get; set; } + public global::System.Collections.Generic.IList? Type781 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter? Type782 { get; set; } + public global::OpenAI.ListVectorStoreFilesResponse? Type782 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResourcesFileSearch? Type783 { get; set; } + public global::System.Collections.Generic.IList? Type783 { get; set; } /// /// /// - public global::OpenAI.Project? Type784 { get; set; } + public global::OpenAI.VectorStoreFileObject? Type784 { get; set; } /// /// /// - public global::OpenAI.ProjectObject? Type785 { get; set; } + public global::OpenAI.VectorStoreFileObjectObject? Type785 { get; set; } /// /// /// - public global::OpenAI.ProjectStatus? Type786 { get; set; } + public global::OpenAI.VectorStoreFileObjectStatus? Type786 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKey? Type787 { get; set; } + public global::OpenAI.VectorStoreFileObjectLastError? Type787 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyObject? Type788 { get; set; } + public global::OpenAI.VectorStoreFileObjectLastErrorCode? Type788 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyOwner? Type789 { get; set; } + public global::OpenAI.VectorStoreFileObjectChunkingStrategy? Type789 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyOwnerType? Type790 { get; set; } + public global::OpenAI.StaticChunkingStrategyResponseParam? Type790 { get; set; } /// /// /// - public global::OpenAI.ProjectUser? Type791 { get; set; } + public global::OpenAI.StaticChunkingStrategyResponseParamType? Type791 { get; set; } /// /// /// - public global::OpenAI.ProjectUserObject? Type792 { get; set; } + public global::OpenAI.OtherChunkingStrategyResponseParam? Type792 { get; set; } /// /// /// - public global::OpenAI.ProjectUserRole? Type793 { get; set; } + public global::OpenAI.OtherChunkingStrategyResponseParamType? Type793 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccount? Type794 { get; set; } + public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminator? Type794 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountObject? Type795 { get; set; } + public global::OpenAI.VectorStoreFileObjectChunkingStrategyDiscriminatorType? Type795 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountRole? Type796 { get; set; } + public global::OpenAI.ListVectorStoresResponse? Type796 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyDeleteResponse? Type797 { get; set; } + public global::System.Collections.Generic.IList? Type797 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyDeleteResponseObject? Type798 { get; set; } + public global::OpenAI.VectorStoreObject? Type798 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyListResponse? Type799 { get; set; } + public global::OpenAI.VectorStoreObjectObject? Type799 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyListResponseObject? Type800 { get; set; } + public global::OpenAI.VectorStoreObjectFileCounts? Type800 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type801 { get; set; } + public global::OpenAI.VectorStoreObjectStatus? Type801 { get; set; } /// /// /// - public global::OpenAI.ProjectCreateRequest? Type802 { get; set; } + public global::OpenAI.ModifyAssistantRequest? Type802 { get; set; } /// /// /// - public global::OpenAI.ProjectListResponse? Type803 { get; set; } + public global::System.Collections.Generic.IList? Type803 { get; set; } /// /// /// - public global::OpenAI.ProjectListResponseObject? Type804 { get; set; } + public global::OpenAI.ToolsItem7? Type804 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type805 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolDiscriminator? Type805 { get; set; } /// /// /// - public global::OpenAI.ProjectRateLimit? Type806 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolDiscriminatorType? Type806 { get; set; } /// /// /// - public global::OpenAI.ProjectRateLimitObject? Type807 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResources? Type807 { get; set; } /// /// /// - public global::OpenAI.ProjectRateLimitListResponse? Type808 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResourcesCodeInterpreter? Type808 { get; set; } /// /// /// - public global::OpenAI.ProjectRateLimitListResponseObject? Type809 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResourcesFileSearch? Type809 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type810 { get; set; } + public global::OpenAI.ModifyMessageRequest? Type810 { get; set; } /// /// /// - public global::OpenAI.ProjectRateLimitUpdateRequest? Type811 { get; set; } + public global::OpenAI.ModifyRunRequest? Type811 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountApiKey? Type812 { get; set; } + public global::OpenAI.ModifyThreadRequest? Type812 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountApiKeyObject? Type813 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResources? Type813 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateRequest? Type814 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter? Type814 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponse? Type815 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResourcesFileSearch? Type815 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponseObject? Type816 { get; set; } + public global::OpenAI.Project? Type816 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponseRole? Type817 { get; set; } + public global::OpenAI.ProjectObject? Type817 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountDeleteResponse? Type818 { get; set; } + public global::OpenAI.ProjectStatus? Type818 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountDeleteResponseObject? Type819 { get; set; } + public global::OpenAI.ProjectApiKey? Type819 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountListResponse? Type820 { get; set; } + public global::OpenAI.ProjectApiKeyObject? Type820 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountListResponseObject? Type821 { get; set; } + public global::OpenAI.ProjectApiKeyOwner? Type821 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type822 { get; set; } + public global::OpenAI.ProjectApiKeyOwnerType? Type822 { get; set; } /// /// /// - public global::OpenAI.ProjectUpdateRequest? Type823 { get; set; } + public global::OpenAI.ProjectUser? Type823 { get; set; } /// /// /// - public global::OpenAI.ProjectUserCreateRequest? Type824 { get; set; } + public global::OpenAI.ProjectUserObject? Type824 { get; set; } /// /// /// - public global::OpenAI.ProjectUserCreateRequestRole? Type825 { get; set; } + public global::OpenAI.ProjectUserRole? Type825 { get; set; } /// /// /// - public global::OpenAI.ProjectUserDeleteResponse? Type826 { get; set; } + public global::OpenAI.ProjectServiceAccount? Type826 { get; set; } /// /// /// - public global::OpenAI.ProjectUserDeleteResponseObject? Type827 { get; set; } + public global::OpenAI.ProjectServiceAccountObject? Type827 { get; set; } /// /// /// - public global::OpenAI.ProjectUserListResponse? Type828 { get; set; } + public global::OpenAI.ProjectServiceAccountRole? Type828 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type829 { get; set; } + public global::OpenAI.ProjectApiKeyDeleteResponse? Type829 { get; set; } /// /// /// - public global::OpenAI.ProjectUserUpdateRequest? Type830 { get; set; } + public global::OpenAI.ProjectApiKeyDeleteResponseObject? Type830 { get; set; } /// /// /// - public global::OpenAI.ProjectUserUpdateRequestRole? Type831 { get; set; } + public global::OpenAI.ProjectApiKeyListResponse? Type831 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemCreate? Type832 { get; set; } + public global::OpenAI.ProjectApiKeyListResponseObject? Type832 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemCreateType? Type833 { get; set; } + public global::System.Collections.Generic.IList? Type833 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItem? Type834 { get; set; } + public global::OpenAI.ProjectCreateRequest? Type834 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemObject? Type835 { get; set; } + public global::OpenAI.ProjectListResponse? Type835 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemType? Type836 { get; set; } + public global::OpenAI.ProjectListResponseObject? Type836 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemStatus? Type837 { get; set; } + public global::System.Collections.Generic.IList? Type837 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemRole? Type838 { get; set; } + public global::OpenAI.ProjectRateLimit? Type838 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type839 { get; set; } + public global::OpenAI.ProjectRateLimitObject? Type839 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemContentItem? Type840 { get; set; } + public global::OpenAI.ProjectRateLimitListResponse? Type840 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemContentItemType? Type841 { get; set; } + public global::OpenAI.ProjectRateLimitListResponseObject? Type841 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemDelete? Type842 { get; set; } + public global::System.Collections.Generic.IList? Type842 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemDeleteType? Type843 { get; set; } + public global::OpenAI.ProjectRateLimitUpdateRequest? Type843 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemTruncate? Type844 { get; set; } + public global::OpenAI.ProjectServiceAccountApiKey? Type844 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventConversationItemTruncateType? Type845 { get; set; } + public global::OpenAI.ProjectServiceAccountApiKeyObject? Type845 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferAppend? Type846 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateRequest? Type846 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferAppendType? Type847 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponse? Type847 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferClear? Type848 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponseObject? Type848 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferClearType? Type849 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponseRole? Type849 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferCommit? Type850 { get; set; } + public global::OpenAI.ProjectServiceAccountDeleteResponse? Type850 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventInputAudioBufferCommitType? Type851 { get; set; } + public global::OpenAI.ProjectServiceAccountDeleteResponseObject? Type851 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCancel? Type852 { get; set; } + public global::OpenAI.ProjectServiceAccountListResponse? Type852 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCancelType? Type853 { get; set; } + public global::OpenAI.ProjectServiceAccountListResponseObject? Type853 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCreate? Type854 { get; set; } + public global::System.Collections.Generic.IList? Type854 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventResponseCreateType? Type855 { get; set; } + public global::OpenAI.ProjectUpdateRequest? Type855 { get; set; } /// /// /// - public global::OpenAI.RealtimeSession? Type856 { get; set; } + public global::OpenAI.ProjectUserCreateRequest? Type856 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionObject? Type857 { get; set; } + public global::OpenAI.ProjectUserCreateRequestRole? Type857 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionVoice? Type858 { get; set; } + public global::OpenAI.ProjectUserDeleteResponse? Type858 { get; set; } /// /// /// - public global::OpenAI.RealtimeAudioFormat? Type859 { get; set; } + public global::OpenAI.ProjectUserDeleteResponseObject? Type859 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionInputAudioTranscription? Type860 { get; set; } + public global::OpenAI.ProjectUserListResponse? Type860 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTurnDetection? Type861 { get; set; } + public global::System.Collections.Generic.IList? Type861 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTurnDetectionType? Type862 { get; set; } + public global::OpenAI.ProjectUserUpdateRequest? Type862 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type863 { get; set; } + public global::OpenAI.ProjectUserUpdateRequestRole? Type863 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTool? Type864 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemCreate? Type864 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionToolChoice? Type865 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemCreateType? Type865 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type866 { get; set; } + public global::OpenAI.RealtimeConversationItem? Type866 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionMaxOutputTokens? Type867 { get; set; } + public global::OpenAI.RealtimeConversationItemObject? Type867 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdate? Type868 { get; set; } + public global::OpenAI.RealtimeConversationItemType? Type868 { get; set; } /// /// /// - public global::OpenAI.RealtimeClientEventSessionUpdateType? Type869 { get; set; } + public global::OpenAI.RealtimeConversationItemStatus? Type869 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponse? Type870 { get; set; } + public global::OpenAI.RealtimeConversationItemRole? Type870 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseObject? Type871 { get; set; } + public global::System.Collections.Generic.IList? Type871 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseStatus? Type872 { get; set; } + public global::OpenAI.RealtimeConversationItemContentItem? Type872 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type873 { get; set; } + public global::OpenAI.RealtimeConversationItemContentItemType? Type873 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseUsage? Type874 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemDelete? Type874 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationCreated? Type875 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemDeleteType? Type875 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationCreatedType? Type876 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemTruncate? Type876 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationCreatedConversation? Type877 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemTruncateType? Type877 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemCreated? Type878 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferAppend? Type878 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemCreatedType? Type879 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferAppendType? Type879 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemDeleted? Type880 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferClear? Type880 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemDeletedType? Type881 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferClearType? Type881 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? Type882 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferCommit? Type882 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? Type883 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferCommitType? Type883 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? Type884 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCancel? Type884 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? Type885 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCancelType? Type885 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? Type886 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreate? Type886 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemTruncated? Type887 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreateType? Type887 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventConversationItemTruncatedType? Type888 { get; set; } + public global::OpenAI.RealtimeResponseCreateParams? Type888 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventError? Type889 { get; set; } + public global::System.Collections.Generic.IList? Type889 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventErrorType? Type890 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsModalitie? Type890 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventErrorError? Type891 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsVoice? Type891 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferCleared? Type892 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat? Type892 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferClearedType? Type893 { get; set; } + public global::System.Collections.Generic.IList? Type893 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? Type894 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsTool? Type894 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferCommittedType? Type895 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsToolType? Type895 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? Type896 { get; set; } + public global::OpenAI.OneOf? Type896 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStartedType? Type897 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsMaxResponseOutputTokens? Type897 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? Type898 { get; set; } + public global::OpenAI.OneOf? Type898 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStoppedType? Type899 { get; set; } + public global::OpenAI.RealtimeResponseCreateParamsConversation? Type899 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventRateLimitsUpdated? Type900 { get; set; } + public global::System.Collections.Generic.IList? Type900 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventRateLimitsUpdatedType? Type901 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdate? Type901 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type902 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdateType? Type902 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? Type903 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequest? Type903 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioDelta? Type904 { get; set; } + public global::System.Collections.Generic.IList? Type904 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioDeltaType? Type905 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestModalitie? Type905 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioDone? Type906 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestModel? Type906 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioDoneType? Type907 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestVoice? Type907 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? Type908 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? Type908 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDeltaType? Type909 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? Type909 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? Type910 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? Type910 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDoneType? Type911 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestTurnDetection? Type911 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartAdded? Type912 { get; set; } + public global::System.Collections.Generic.IList? Type912 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartAddedType? Type913 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestTool? Type913 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? Type914 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestToolType? Type914 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartAddedPartType? Type915 { get; set; } + public global::OpenAI.OneOf? Type915 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartDone? Type916 { get; set; } + public global::OpenAI.RealtimeSessionCreateRequestMaxResponseOutputTokens? Type916 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartDoneType? Type917 { get; set; } + public global::OpenAI.RealtimeResponse? Type917 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseContentPartDonePart? Type918 { get; set; } + public global::OpenAI.RealtimeResponseObject? Type918 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseCreated? Type919 { get; set; } + public global::OpenAI.RealtimeResponseStatus? Type919 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseCreatedType? Type920 { get; set; } + public global::OpenAI.RealtimeResponseUsage? Type920 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseDone? Type921 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreated? Type921 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseDoneType? Type922 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreatedType? Type922 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? Type923 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreatedConversation? Type923 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDeltaType? Type924 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemCreated? Type924 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? Type925 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemCreatedType? Type925 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDoneType? Type926 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemDeleted? Type926 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemAdded? Type927 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemDeletedType? Type927 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemAddedType? Type928 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? Type928 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemDone? Type929 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? Type929 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseOutputItemDoneType? Type930 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? Type930 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseTextDelta? Type931 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? Type931 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseTextDeltaType? Type932 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? Type932 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseTextDone? Type933 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemTruncated? Type933 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventResponseTextDoneType? Type934 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemTruncatedType? Type934 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreated? Type935 { get; set; } + public global::OpenAI.RealtimeServerEventError? Type935 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionCreatedType? Type936 { get; set; } + public global::OpenAI.RealtimeServerEventErrorType? Type936 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdated? Type937 { get; set; } + public global::OpenAI.RealtimeServerEventErrorError? Type937 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventSessionUpdatedType? Type938 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCleared? Type938 { get; set; } /// /// /// - public global::OpenAI.SubmitToolOutputsRunRequest? Type939 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferClearedType? Type939 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type940 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? Type940 { get; set; } /// /// /// - public global::OpenAI.SubmitToolOutputsRunRequestToolOutput? Type941 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCommittedType? Type941 { get; set; } /// /// /// - public global::OpenAI.UpdateVectorStoreRequest? Type942 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? Type942 { get; set; } /// /// /// - public global::OpenAI.Upload? Type943 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStartedType? Type943 { get; set; } /// /// /// - public global::OpenAI.UploadStatus? Type944 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? Type944 { get; set; } /// /// /// - public global::OpenAI.UploadObject? Type945 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStoppedType? Type945 { get; set; } /// /// /// - public global::OpenAI.UploadPart? Type946 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdated? Type946 { get; set; } /// /// /// - public global::OpenAI.UploadPartObject? Type947 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdatedType? Type947 { get; set; } /// /// /// - public global::OpenAI.UsageAudioSpeechesResult? Type948 { get; set; } + public global::System.Collections.Generic.IList? Type948 { get; set; } /// /// /// - public global::OpenAI.UsageAudioSpeechesResultObject? Type949 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? Type949 { get; set; } /// /// /// - public global::OpenAI.UsageAudioTranscriptionsResult? Type950 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName? Type950 { get; set; } /// /// /// - public global::OpenAI.UsageAudioTranscriptionsResultObject? Type951 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDelta? Type951 { get; set; } /// /// /// - public global::OpenAI.UsageCodeInterpreterSessionsResult? Type952 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDeltaType? Type952 { get; set; } /// /// /// - public global::OpenAI.UsageCodeInterpreterSessionsResultObject? Type953 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDone? Type953 { get; set; } /// /// /// - public global::OpenAI.UsageCompletionsResult? Type954 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDoneType? Type954 { get; set; } /// /// /// - public global::OpenAI.UsageCompletionsResultObject? Type955 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? Type955 { get; set; } /// /// /// - public global::OpenAI.UsageEmbeddingsResult? Type956 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDeltaType? Type956 { get; set; } /// /// /// - public global::OpenAI.UsageEmbeddingsResultObject? Type957 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? Type957 { get; set; } /// /// /// - public global::OpenAI.UsageImagesResult? Type958 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDoneType? Type958 { get; set; } /// /// /// - public global::OpenAI.UsageImagesResultObject? Type959 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAdded? Type959 { get; set; } /// /// /// - public global::OpenAI.UsageModerationsResult? Type960 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAddedType? Type960 { get; set; } /// /// /// - public global::OpenAI.UsageModerationsResultObject? Type961 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? Type961 { get; set; } /// /// /// - public global::OpenAI.UsageResponse? Type962 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAddedPartType? Type962 { get; set; } /// /// /// - public global::OpenAI.UsageResponseObject? Type963 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDone? Type963 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type964 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDoneType? Type964 { get; set; } /// /// /// - public global::OpenAI.UsageTimeBucket? Type965 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDonePart? Type965 { get; set; } /// /// /// - public global::OpenAI.UsageTimeBucketObject? Type966 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDonePartType? Type966 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type967 { get; set; } + public global::OpenAI.RealtimeServerEventResponseCreated? Type967 { get; set; } /// /// /// - public global::OpenAI.ResultItem? Type968 { get; set; } + public global::OpenAI.RealtimeServerEventResponseCreatedType? Type968 { get; set; } /// /// /// - public global::OpenAI.UsageVectorStoresResult? Type969 { get; set; } + public global::OpenAI.RealtimeServerEventResponseDone? Type969 { get; set; } /// /// /// - public global::OpenAI.UsageVectorStoresResultObject? Type970 { get; set; } + public global::OpenAI.RealtimeServerEventResponseDoneType? Type970 { get; set; } /// /// /// - public global::OpenAI.UsageTimeBucketResultItemDiscriminator? Type971 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? Type971 { get; set; } /// /// /// - public global::OpenAI.UsageTimeBucketResultItemDiscriminatorObject? Type972 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDeltaType? Type972 { get; set; } /// /// /// - public global::OpenAI.User? Type973 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? Type973 { get; set; } /// /// /// - public global::OpenAI.UserObject? Type974 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDoneType? Type974 { get; set; } /// /// /// - public global::OpenAI.UserRole? Type975 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemAdded? Type975 { get; set; } /// /// /// - public global::OpenAI.UserDeleteResponse? Type976 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemAddedType? Type976 { get; set; } /// /// /// - public global::OpenAI.UserDeleteResponseObject? Type977 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemDone? Type977 { get; set; } /// /// /// - public global::OpenAI.UserListResponse? Type978 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemDoneType? Type978 { get; set; } /// /// /// - public global::OpenAI.UserListResponseObject? Type979 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDelta? Type979 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type980 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDeltaType? Type980 { get; set; } /// /// /// - public global::OpenAI.UserRoleUpdateRequest? Type981 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDone? Type981 { get; set; } /// /// /// - public global::OpenAI.UserRoleUpdateRequestRole? Type982 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDoneType? Type982 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObject? Type983 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreated? Type983 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectObject? Type984 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreatedType? Type984 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectStatus? Type985 { get; set; } + public global::OpenAI.RealtimeSession? Type985 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectFileCounts? Type986 { get; set; } + public global::OpenAI.RealtimeSessionObject? Type986 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventType? Type987 { get; set; } + public global::OpenAI.RealtimeSessionVoice? Type987 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventBase? Type988 { get; set; } + public global::OpenAI.RealtimeAudioFormat? Type988 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversation? Type989 { get; set; } + public global::OpenAI.RealtimeSessionInputAudioTranscription? Type989 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationObject? Type990 { get; set; } + public global::OpenAI.RealtimeSessionTurnDetection? Type990 { get; set; } /// /// /// - public global::OpenAI.RealtimeContentPart? Type991 { get; set; } + public global::OpenAI.RealtimeSessionTurnDetectionType? Type991 { get; set; } /// /// /// - public global::OpenAI.RealtimeContentPartType? Type992 { get; set; } + public global::System.Collections.Generic.IList? Type992 { get; set; } /// /// /// - public global::OpenAI.RealtimeErrorDetails? Type993 { get; set; } + public global::OpenAI.RealtimeSessionTool? Type993 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdate? Type994 { get; set; } + public global::OpenAI.RealtimeSessionToolChoice? Type994 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdateType? Type995 { get; set; } + public global::OpenAI.OneOf? Type995 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferAppend? Type996 { get; set; } + public global::OpenAI.RealtimeSessionMaxOutputTokens? Type996 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferAppendType? Type997 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdated? Type997 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommit? Type998 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdatedType? Type998 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommitType? Type999 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponse? Type999 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClear? Type1000 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseClientSecret? Type1000 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClearType? Type1001 { get; set; } + public global::System.Collections.Generic.IList? Type1001 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreate? Type1002 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseModalitie? Type1002 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreateType? Type1003 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseVoice? Type1003 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncate? Type1004 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription? Type1004 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncateType? Type1005 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseTurnDetection? Type1005 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDelete? Type1006 { get; set; } + public global::System.Collections.Generic.IList? Type1006 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeleteType? Type1007 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseTool? Type1007 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreate? Type1008 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseToolType? Type1008 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateType? Type1009 { get; set; } + public global::OpenAI.OneOf? Type1009 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponse? Type1010 { get; set; } + public global::OpenAI.RealtimeSessionCreateResponseMaxResponseOutputTokens? Type1010 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1011 { get; set; } + public global::OpenAI.SubmitToolOutputsRunRequest? Type1011 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseModalitie? Type1012 { get; set; } + public global::System.Collections.Generic.IList? Type1012 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseVoice? Type1013 { get; set; } + public global::OpenAI.SubmitToolOutputsRunRequestToolOutput? Type1013 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1014 { get; set; } + public global::OpenAI.UpdateVectorStoreRequest? Type1014 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseTool? Type1015 { get; set; } + public global::OpenAI.Upload? Type1015 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type1016 { get; set; } + public global::OpenAI.UploadStatus? Type1016 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseToolChoice? Type1017 { get; set; } + public global::OpenAI.UploadObject? Type1017 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type1018 { get; set; } + public global::OpenAI.UploadPart? Type1018 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseMaxOutputTokens? Type1019 { get; set; } + public global::OpenAI.UploadPartObject? Type1019 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCancel? Type1020 { get; set; } + public global::OpenAI.UsageAudioSpeechesResult? Type1020 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCancelType? Type1021 { get; set; } + public global::OpenAI.UsageAudioSpeechesResultObject? Type1021 { get; set; } /// /// /// - public global::OpenAI.RealtimeError? Type1022 { get; set; } + public global::OpenAI.UsageAudioTranscriptionsResult? Type1022 { get; set; } /// /// /// - public global::OpenAI.RealtimeErrorType? Type1023 { get; set; } + public global::OpenAI.UsageAudioTranscriptionsResultObject? Type1023 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionCreated? Type1024 { get; set; } + public global::OpenAI.UsageCodeInterpreterSessionsResult? Type1024 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionCreatedType? Type1025 { get; set; } + public global::OpenAI.UsageCodeInterpreterSessionsResultObject? Type1025 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdated? Type1026 { get; set; } + public global::OpenAI.UsageCompletionsResult? Type1026 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdatedType? Type1027 { get; set; } + public global::OpenAI.UsageCompletionsResultObject? Type1027 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationCreated? Type1028 { get; set; } + public global::OpenAI.UsageEmbeddingsResult? Type1028 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationCreatedType? Type1029 { get; set; } + public global::OpenAI.UsageEmbeddingsResultObject? Type1029 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreated? Type1030 { get; set; } + public global::OpenAI.UsageImagesResult? Type1030 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreatedType? Type1031 { get; set; } + public global::OpenAI.UsageImagesResultObject? Type1031 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompleted? Type1032 { get; set; } + public global::OpenAI.UsageModerationsResult? Type1032 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompletedType? Type1033 { get; set; } + public global::OpenAI.UsageModerationsResultObject? Type1033 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailed? Type1034 { get; set; } + public global::OpenAI.UsageResponse? Type1034 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailedType? Type1035 { get; set; } + public global::OpenAI.UsageResponseObject? Type1035 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncated? Type1036 { get; set; } + public global::System.Collections.Generic.IList? Type1036 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncatedType? Type1037 { get; set; } + public global::OpenAI.UsageTimeBucket? Type1037 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeleted? Type1038 { get; set; } + public global::OpenAI.UsageTimeBucketObject? Type1038 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeletedType? Type1039 { get; set; } + public global::System.Collections.Generic.IList? Type1039 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommitted? Type1040 { get; set; } + public global::OpenAI.ResultItem? Type1040 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommittedType? Type1041 { get; set; } + public global::OpenAI.UsageVectorStoresResult? Type1041 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCleared? Type1042 { get; set; } + public global::OpenAI.UsageVectorStoresResultObject? Type1042 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClearedType? Type1043 { get; set; } + public global::OpenAI.UsageTimeBucketResultItemDiscriminator? Type1043 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStarted? Type1044 { get; set; } + public global::OpenAI.UsageTimeBucketResultItemDiscriminatorObject? Type1044 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStartedType? Type1045 { get; set; } + public global::OpenAI.User? Type1045 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStopped? Type1046 { get; set; } + public global::OpenAI.UserObject? Type1046 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStoppedType? Type1047 { get; set; } + public global::OpenAI.UserRole? Type1047 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreated? Type1048 { get; set; } + public global::OpenAI.UserDeleteResponse? Type1048 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreatedType? Type1049 { get; set; } + public global::OpenAI.UserDeleteResponseObject? Type1049 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseDone? Type1050 { get; set; } + public global::OpenAI.UserListResponse? Type1050 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseDoneType? Type1051 { get; set; } + public global::OpenAI.UserListResponseObject? Type1051 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemAdded? Type1052 { get; set; } + public global::System.Collections.Generic.IList? Type1052 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemAddedType? Type1053 { get; set; } + public global::OpenAI.UserRoleUpdateRequest? Type1053 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemDone? Type1054 { get; set; } + public global::OpenAI.UserRoleUpdateRequestRole? Type1054 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemDoneType? Type1055 { get; set; } + public global::OpenAI.VectorStoreFileBatchObject? Type1055 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartAdded? Type1056 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectObject? Type1056 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartAddedType? Type1057 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectStatus? Type1057 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartDone? Type1058 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectFileCounts? Type1058 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartDoneType? Type1059 { get; set; } + public global::OpenAI.RealtimeServerEventType? Type1059 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDelta? Type1060 { get; set; } + public global::OpenAI.RealtimeServerEventBase? Type1060 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDeltaType? Type1061 { get; set; } + public global::OpenAI.RealtimeConversation? Type1061 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDone? Type1062 { get; set; } + public global::OpenAI.RealtimeConversationObject? Type1062 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDoneType? Type1063 { get; set; } + public global::OpenAI.RealtimeContentPart? Type1063 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDelta? Type1064 { get; set; } + public global::OpenAI.RealtimeContentPartType? Type1064 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDeltaType? Type1065 { get; set; } + public global::OpenAI.RealtimeErrorDetails? Type1065 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDone? Type1066 { get; set; } + public global::OpenAI.RealtimeSessionUpdate? Type1066 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDoneType? Type1067 { get; set; } + public global::OpenAI.RealtimeSessionUpdateType? Type1067 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDelta? Type1068 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferAppend? Type1068 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDeltaType? Type1069 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferAppendType? Type1069 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDone? Type1070 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommit? Type1070 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDoneType? Type1071 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommitType? Type1071 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDelta? Type1072 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClear? Type1072 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDeltaType? Type1073 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClearType? Type1073 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDone? Type1074 { get; set; } + public global::OpenAI.RealtimeConversationItemCreate? Type1074 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDoneType? Type1075 { get; set; } + public global::OpenAI.RealtimeConversationItemCreateType? Type1075 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdated? Type1076 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncate? Type1076 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdatedType? Type1077 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncateType? Type1077 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1078 { get; set; } + public global::OpenAI.RealtimeConversationItemDelete? Type1078 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdatedRateLimit? Type1079 { get; set; } + public global::OpenAI.RealtimeConversationItemDeleteType? Type1079 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdatedRateLimitName? Type1080 { get; set; } + public global::OpenAI.RealtimeResponseCreate? Type1080 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEvent? Type1081 { get; set; } + public global::OpenAI.RealtimeResponseCreateType? Type1081 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventDiscriminator? Type1082 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponse? Type1082 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventDiscriminatorType? Type1083 { get; set; } + public global::System.Collections.Generic.IList? Type1083 { get; set; } /// /// /// - public global::OpenAI.CreateBatchRequest? Type1084 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseModalitie? Type1084 { get; set; } /// /// /// - public global::OpenAI.CreateBatchRequestEndpoint? Type1085 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseVoice? Type1085 { get; set; } /// /// /// - public global::OpenAI.CreateBatchRequestCompletionWindow? Type1086 { get; set; } + public global::System.Collections.Generic.IList? Type1086 { get; set; } /// /// /// - public global::OpenAI.ListAssistantsOrder? Type1087 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseTool? Type1087 { get; set; } /// /// /// - public global::OpenAI.ListFilesOrder? Type1088 { get; set; } + public global::OpenAI.OneOf? Type1088 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsEffectiveAt? Type1089 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseToolChoice? Type1089 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1090 { get; set; } + public global::OpenAI.OneOf? Type1090 { get; set; } /// /// /// - public global::OpenAI.UsageCostsBucketWidth? Type1091 { get; set; } + public global::OpenAI.RealtimeResponseCreateResponseMaxOutputTokens? Type1091 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1092 { get; set; } + public global::OpenAI.RealtimeResponseCancel? Type1092 { get; set; } /// /// /// - public global::OpenAI.UsageCostsGroupByItem? Type1093 { get; set; } + public global::OpenAI.RealtimeResponseCancelType? Type1093 { get; set; } /// /// /// - public global::OpenAI.UsageAudioSpeechesBucketWidth? Type1094 { get; set; } + public global::OpenAI.RealtimeError? Type1094 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1095 { get; set; } + public global::OpenAI.RealtimeErrorType? Type1095 { get; set; } /// /// /// - public global::OpenAI.UsageAudioSpeechesGroupByItem? Type1096 { get; set; } + public global::OpenAI.RealtimeSessionCreated? Type1096 { get; set; } /// /// /// - public global::OpenAI.UsageAudioTranscriptionsBucketWidth? Type1097 { get; set; } + public global::OpenAI.RealtimeSessionCreatedType? Type1097 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1098 { get; set; } + public global::OpenAI.RealtimeSessionUpdated? Type1098 { get; set; } /// /// /// - public global::OpenAI.UsageAudioTranscriptionsGroupByItem? Type1099 { get; set; } + public global::OpenAI.RealtimeSessionUpdatedType? Type1099 { get; set; } /// /// /// - public global::OpenAI.UsageCodeInterpreterSessionsBucketWidth? Type1100 { get; set; } + public global::OpenAI.RealtimeConversationCreated? Type1100 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1101 { get; set; } + public global::OpenAI.RealtimeConversationCreatedType? Type1101 { get; set; } /// /// /// - public global::OpenAI.UsageCodeInterpreterSessionsGroupByItem? Type1102 { get; set; } + public global::OpenAI.RealtimeConversationItemCreated? Type1102 { get; set; } /// /// /// - public global::OpenAI.UsageCompletionsBucketWidth? Type1103 { get; set; } + public global::OpenAI.RealtimeConversationItemCreatedType? Type1103 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1104 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompleted? Type1104 { get; set; } /// /// /// - public global::OpenAI.UsageCompletionsGroupByItem? Type1105 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompletedType? Type1105 { get; set; } /// /// /// - public global::OpenAI.UsageEmbeddingsBucketWidth? Type1106 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailed? Type1106 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1107 { get; set; } + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailedType? Type1107 { get; set; } /// /// /// - public global::OpenAI.UsageEmbeddingsGroupByItem? Type1108 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncated? Type1108 { get; set; } /// /// /// - public global::OpenAI.UsageImagesBucketWidth? Type1109 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncatedType? Type1109 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1110 { get; set; } + public global::OpenAI.RealtimeConversationItemDeleted? Type1110 { get; set; } /// /// /// - public global::OpenAI.UsageImagesSource? Type1111 { get; set; } + public global::OpenAI.RealtimeConversationItemDeletedType? Type1111 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1112 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommitted? Type1112 { get; set; } /// /// /// - public global::OpenAI.UsageImagesSize? Type1113 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommittedType? Type1113 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1114 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCleared? Type1114 { get; set; } /// /// /// - public global::OpenAI.UsageImagesGroupByItem? Type1115 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClearedType? Type1115 { get; set; } /// /// /// - public global::OpenAI.UsageModerationsBucketWidth? Type1116 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStarted? Type1116 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1117 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStartedType? Type1117 { get; set; } /// /// /// - public global::OpenAI.UsageModerationsGroupByItem? Type1118 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStopped? Type1118 { get; set; } /// /// /// - public global::OpenAI.UsageVectorStoresBucketWidth? Type1119 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferSpeechStoppedType? Type1119 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1120 { get; set; } + public global::OpenAI.RealtimeResponseCreated? Type1120 { get; set; } /// /// /// - public global::OpenAI.UsageVectorStoresGroupByItem? Type1121 { get; set; } + public global::OpenAI.RealtimeResponseCreatedType? Type1121 { get; set; } /// /// /// - public global::OpenAI.ListMessagesOrder? Type1122 { get; set; } + public global::OpenAI.RealtimeResponseDone? Type1122 { get; set; } /// /// /// - public global::OpenAI.ListRunsOrder? Type1123 { get; set; } + public global::OpenAI.RealtimeResponseDoneType? Type1123 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1124 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemAdded? Type1124 { get; set; } /// /// /// - public global::OpenAI.CreateRunIncludeItem? Type1125 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemAddedType? Type1125 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsOrder? Type1126 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemDone? Type1126 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1127 { get; set; } + public global::OpenAI.RealtimeResponseOutputItemDoneType? Type1127 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsIncludeItem? Type1128 { get; set; } + public global::OpenAI.RealtimeResponseContentPartAdded? Type1128 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type1129 { get; set; } + public global::OpenAI.RealtimeResponseContentPartAddedType? Type1129 { get; set; } /// /// /// - public global::OpenAI.GetRunStepIncludeItem? Type1130 { get; set; } + public global::OpenAI.RealtimeResponseContentPartDone? Type1130 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoresOrder? Type1131 { get; set; } + public global::OpenAI.RealtimeResponseContentPartDoneType? Type1131 { get; set; } /// /// /// - public global::OpenAI.ListFilesInVectorStoreBatchOrder? Type1132 { get; set; } + public global::OpenAI.RealtimeResponseTextDelta? Type1132 { get; set; } /// /// /// - public global::OpenAI.ListFilesInVectorStoreBatchFilter? Type1133 { get; set; } + public global::OpenAI.RealtimeResponseTextDeltaType? Type1133 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesOrder? Type1134 { get; set; } + public global::OpenAI.RealtimeResponseTextDone? Type1134 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesFilter? Type1135 { get; set; } + public global::OpenAI.RealtimeResponseTextDoneType? Type1135 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type1136 { get; set; } + public global::OpenAI.RealtimeResponseAudioTranscriptDelta? Type1136 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type1137 { get; set; } + public global::OpenAI.RealtimeResponseAudioTranscriptDeltaType? Type1137 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioTranscriptDone? Type1138 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioTranscriptDoneType? Type1139 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDelta? Type1140 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDeltaType? Type1141 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDone? Type1142 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDoneType? Type1143 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDelta? Type1144 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDeltaType? Type1145 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDone? Type1146 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDoneType? Type1147 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdated? Type1148 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdatedType? Type1149 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1150 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdatedRateLimit? Type1151 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdatedRateLimitName? Type1152 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeServerEvent? Type1153 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeServerEventDiscriminator? Type1154 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeServerEventDiscriminatorType? Type1155 { get; set; } + /// + /// + /// + public global::OpenAI.CreateBatchRequest? Type1156 { get; set; } + /// + /// + /// + public global::OpenAI.CreateBatchRequestEndpoint? Type1157 { get; set; } + /// + /// + /// + public global::OpenAI.CreateBatchRequestCompletionWindow? Type1158 { get; set; } + /// + /// + /// + public global::OpenAI.ListAssistantsOrder? Type1159 { get; set; } + /// + /// + /// + public global::OpenAI.ListFilesOrder? Type1160 { get; set; } + /// + /// + /// + public global::OpenAI.ListAuditLogsEffectiveAt? Type1161 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1162 { get; set; } + /// + /// + /// + public global::OpenAI.UsageCostsBucketWidth? Type1163 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1164 { get; set; } + /// + /// + /// + public global::OpenAI.UsageCostsGroupByItem? Type1165 { get; set; } + /// + /// + /// + public global::OpenAI.UsageAudioSpeechesBucketWidth? Type1166 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1167 { get; set; } + /// + /// + /// + public global::OpenAI.UsageAudioSpeechesGroupByItem? Type1168 { get; set; } + /// + /// + /// + public global::OpenAI.UsageAudioTranscriptionsBucketWidth? Type1169 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1170 { get; set; } + /// + /// + /// + public global::OpenAI.UsageAudioTranscriptionsGroupByItem? Type1171 { get; set; } + /// + /// + /// + public global::OpenAI.UsageCodeInterpreterSessionsBucketWidth? Type1172 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1173 { get; set; } + /// + /// + /// + public global::OpenAI.UsageCodeInterpreterSessionsGroupByItem? Type1174 { get; set; } + /// + /// + /// + public global::OpenAI.UsageCompletionsBucketWidth? Type1175 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1176 { get; set; } + /// + /// + /// + public global::OpenAI.UsageCompletionsGroupByItem? Type1177 { get; set; } + /// + /// + /// + public global::OpenAI.UsageEmbeddingsBucketWidth? Type1178 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1179 { get; set; } + /// + /// + /// + public global::OpenAI.UsageEmbeddingsGroupByItem? Type1180 { get; set; } + /// + /// + /// + public global::OpenAI.UsageImagesBucketWidth? Type1181 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1182 { get; set; } + /// + /// + /// + public global::OpenAI.UsageImagesSource? Type1183 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1184 { get; set; } + /// + /// + /// + public global::OpenAI.UsageImagesSize? Type1185 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1186 { get; set; } + /// + /// + /// + public global::OpenAI.UsageImagesGroupByItem? Type1187 { get; set; } + /// + /// + /// + public global::OpenAI.UsageModerationsBucketWidth? Type1188 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1189 { get; set; } + /// + /// + /// + public global::OpenAI.UsageModerationsGroupByItem? Type1190 { get; set; } + /// + /// + /// + public global::OpenAI.UsageVectorStoresBucketWidth? Type1191 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1192 { get; set; } + /// + /// + /// + public global::OpenAI.UsageVectorStoresGroupByItem? Type1193 { get; set; } + /// + /// + /// + public global::OpenAI.ListMessagesOrder? Type1194 { get; set; } + /// + /// + /// + public global::OpenAI.ListRunsOrder? Type1195 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1196 { get; set; } + /// + /// + /// + public global::OpenAI.CreateRunIncludeItem? Type1197 { get; set; } + /// + /// + /// + public global::OpenAI.ListRunStepsOrder? Type1198 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1199 { get; set; } + /// + /// + /// + public global::OpenAI.ListRunStepsIncludeItem? Type1200 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1201 { get; set; } + /// + /// + /// + public global::OpenAI.GetRunStepIncludeItem? Type1202 { get; set; } + /// + /// + /// + public global::OpenAI.ListVectorStoresOrder? Type1203 { get; set; } + /// + /// + /// + public global::OpenAI.ListFilesInVectorStoreBatchOrder? Type1204 { get; set; } + /// + /// + /// + public global::OpenAI.ListFilesInVectorStoreBatchFilter? Type1205 { get; set; } + /// + /// + /// + public global::OpenAI.ListVectorStoreFilesOrder? Type1206 { get; set; } + /// + /// + /// + public global::OpenAI.ListVectorStoreFilesFilter? Type1207 { get; set; } + /// + /// + /// + public global::OpenAI.OneOf? Type1208 { get; set; } + /// + /// + /// + public global::OpenAI.OneOf? Type1209 { get; set; } } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs index 4a371be8..a621c6ba 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs @@ -24,7 +24,12 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// Creates a model response for the given chat conversation. Learn more in the
/// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- /// and [audio](/docs/guides/audio) guides. + /// and [audio](/docs/guides/audio) guides.
+ /// Parameter support can differ depending on the model used to generate the
+ /// response, particularly for newer reasoning models. Parameters that are only
+ /// supported for reasoning models are noted below. For the current state of
+ /// unsupported parameters in reasoning models,
+ /// [refer to the reasoning guide](/docs/guides/reasoning). ///
/// /// The token to cancel the operation with @@ -162,7 +167,12 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// Creates a model response for the given chat conversation. Learn more in the
/// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- /// and [audio](/docs/guides/audio) guides. + /// and [audio](/docs/guides/audio) guides.
+ /// Parameter support can differ depending on the model used to generate the
+ /// response, particularly for newer reasoning models. Parameters that are only
+ /// supported for reasoning models are noted below. For the current state of
+ /// unsupported parameters in reasoning models,
+ /// [refer to the reasoning guide](/docs/guides/reasoning). ///
/// /// A list of messages comprising the conversation so far. Depending on the
@@ -175,29 +185,49 @@ partial void ProcessCreateChatCompletionResponseContent( /// Example: gpt-4o /// /// - /// Whether or not to store the output of this chat completion request
- /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Whether or not to store the output of this chat completion request for
+ /// use in our [model distillation](/docs/guides/distillation) or
+ /// [evals](/docs/guides/evals) products.
/// Default Value: false /// + /// + /// **o1 models only**
+ /// Constrains effort on reasoning for
+ /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ /// Currently supported values are `low`, `medium`, and `high`. Reducing
+ /// reasoning effort can result in faster responses and fewer tokens used
+ /// on reasoning in a response.
+ /// Default Value: medium + /// /// /// Developer-defined tags and values used for filtering completions
/// in the [dashboard](https://platform.openai.com/chat-completions). /// /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// their existing frequency in the text so far, decreasing the model's
+ /// likelihood to repeat the same line verbatim.
/// Default Value: 0 /// /// /// Modify the likelihood of specified tokens appearing in the completion.
- /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the
+ /// tokenizer) to an associated bias value from -100 to 100. Mathematically,
+ /// the bias is added to the logits generated by the model prior to sampling.
+ /// The exact effect will vary per model, but values between -1 and 1 should
+ /// decrease or increase likelihood of selection; values like -100 or 100
+ /// should result in a ban or exclusive selection of the relevant token. /// /// - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
+ /// Whether to return log probabilities of the output tokens or not. If true,
+ /// returns the log probabilities of each output token returned in the
+ /// `content` of `message`.
/// Default Value: false /// /// - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to
+ /// return at each token position, each with an associated log probability.
+ /// `logprobs` must be set to `true` if this parameter is used. /// /// /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). @@ -227,15 +257,26 @@ partial void ProcessCreateChatCompletionResponseContent( /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). /// /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// whether they appear in the text so far, increasing the model's likelihood
+ /// to talk about new topics.
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
- /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// An object specifying the format that the model must output.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables
+ /// Structured Outputs which ensures the model will match your supplied JSON
+ /// schema. Learn more in the [Structured Outputs
+ /// guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures
+ /// the message the model generates is valid JSON.
+ /// **Important:** when using JSON mode, you **must** also instruct the model
+ /// to produce JSON yourself via a system or user message. Without this, the
+ /// model may generate an unending stream of whitespace until the generation
+ /// reaches the token limit, resulting in a long-running and seemingly "stuck"
+ /// request. Also note that the message content may be partially cut off if
+ /// `finish_reason="length"`, which indicates the generation exceeded
+ /// `max_tokens` or the conversation exceeded the max context length. /// /// /// This feature is in Beta.
@@ -268,7 +309,10 @@ partial void ProcessCreateChatCompletionResponseContent( /// Example: 1 /// /// - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// An alternative to sampling with temperature, called nucleus sampling,
+ /// where the model considers the results of the tokens with top_p probability
+ /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
+ /// are considered.
/// We generally recommend altering this or `temperature` but not both.
/// Default Value: 1
/// Example: 1 @@ -297,6 +341,7 @@ partial void ProcessCreateChatCompletionResponseContent( global::System.Collections.Generic.IList messages, global::OpenAI.AnyOf model, bool? store = default, + global::OpenAI.CreateChatCompletionRequestReasoningEffort? reasoningEffort = default, global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = default, global::System.Collections.Generic.Dictionary? logitBias = default, @@ -327,6 +372,7 @@ partial void ProcessCreateChatCompletionResponseContent( Messages = messages, Model = model, Store = store, + ReasoningEffort = reasoningEffort, Metadata = metadata, FrequencyPenalty = frequencyPenalty, LogitBias = logitBias, diff --git a/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs b/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs index f4c64fe0..d6f62498 100644 --- a/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.FineTuningClient.CreateFineTuningJob.g.cs @@ -173,13 +173,10 @@ partial void ProcessCreateFineTuningJobResponseContent( /// The ID of an uploaded file that contains training data.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
/// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.
- /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.
+ /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input), [completions](/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](/docs/api-reference/fine-tuning/preference-input) format.
/// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
/// Example: file-abc123 /// - /// - /// The hyperparameters used for the fine-tuning job. - /// /// /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @@ -202,27 +199,30 @@ partial void ProcessCreateFineTuningJobResponseContent( /// If a seed is not specified, one will be generated for you.
/// Example: 42 /// + /// + /// The method used for fine-tuning. + /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task CreateFineTuningJobAsync( global::OpenAI.AnyOf model, string trainingFile, - global::OpenAI.CreateFineTuningJobRequestHyperparameters? hyperparameters = default, string? suffix = default, string? validationFile = default, global::System.Collections.Generic.IList? integrations = default, int? seed = default, + global::OpenAI.FineTuneMethod? method = default, global::System.Threading.CancellationToken cancellationToken = default) { var __request = new global::OpenAI.CreateFineTuningJobRequest { Model = model, TrainingFile = trainingFile, - Hyperparameters = hyperparameters, Suffix = suffix, ValidationFile = validationFile, Integrations = integrations, Seed = seed, + Method = method, }; return await CreateFineTuningJobAsync( diff --git a/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs index 95ed3ab8..f79688fc 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs @@ -7,7 +7,12 @@ public partial interface IChatClient /// /// Creates a model response for the given chat conversation. Learn more in the
/// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- /// and [audio](/docs/guides/audio) guides. + /// and [audio](/docs/guides/audio) guides.
+ /// Parameter support can differ depending on the model used to generate the
+ /// response, particularly for newer reasoning models. Parameters that are only
+ /// supported for reasoning models are noted below. For the current state of
+ /// unsupported parameters in reasoning models,
+ /// [refer to the reasoning guide](/docs/guides/reasoning). ///
/// /// The token to cancel the operation with @@ -19,7 +24,12 @@ public partial interface IChatClient /// /// Creates a model response for the given chat conversation. Learn more in the
/// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
- /// and [audio](/docs/guides/audio) guides. + /// and [audio](/docs/guides/audio) guides.
+ /// Parameter support can differ depending on the model used to generate the
+ /// response, particularly for newer reasoning models. Parameters that are only
+ /// supported for reasoning models are noted below. For the current state of
+ /// unsupported parameters in reasoning models,
+ /// [refer to the reasoning guide](/docs/guides/reasoning). ///
/// /// A list of messages comprising the conversation so far. Depending on the
@@ -32,29 +42,49 @@ public partial interface IChatClient /// Example: gpt-4o /// /// - /// Whether or not to store the output of this chat completion request
- /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Whether or not to store the output of this chat completion request for
+ /// use in our [model distillation](/docs/guides/distillation) or
+ /// [evals](/docs/guides/evals) products.
/// Default Value: false /// + /// + /// **o1 models only**
+ /// Constrains effort on reasoning for
+ /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ /// Currently supported values are `low`, `medium`, and `high`. Reducing
+ /// reasoning effort can result in faster responses and fewer tokens used
+ /// on reasoning in a response.
+ /// Default Value: medium + /// /// /// Developer-defined tags and values used for filtering completions
/// in the [dashboard](https://platform.openai.com/chat-completions). /// /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// their existing frequency in the text so far, decreasing the model's
+ /// likelihood to repeat the same line verbatim.
/// Default Value: 0 /// /// /// Modify the likelihood of specified tokens appearing in the completion.
- /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the
+ /// tokenizer) to an associated bias value from -100 to 100. Mathematically,
+ /// the bias is added to the logits generated by the model prior to sampling.
+ /// The exact effect will vary per model, but values between -1 and 1 should
+ /// decrease or increase likelihood of selection; values like -100 or 100
+ /// should result in a ban or exclusive selection of the relevant token. /// /// - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
+ /// Whether to return log probabilities of the output tokens or not. If true,
+ /// returns the log probabilities of each output token returned in the
+ /// `content` of `message`.
/// Default Value: false /// /// - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to
+ /// return at each token position, each with an associated log probability.
+ /// `logprobs` must be set to `true` if this parameter is used. /// /// /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). @@ -84,15 +114,26 @@ public partial interface IChatClient /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). /// /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// whether they appear in the text so far, increasing the model's likelihood
+ /// to talk about new topics.
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
- /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// An object specifying the format that the model must output.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables
+ /// Structured Outputs which ensures the model will match your supplied JSON
+ /// schema. Learn more in the [Structured Outputs
+ /// guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures
+ /// the message the model generates is valid JSON.
+ /// **Important:** when using JSON mode, you **must** also instruct the model
+ /// to produce JSON yourself via a system or user message. Without this, the
+ /// model may generate an unending stream of whitespace until the generation
+ /// reaches the token limit, resulting in a long-running and seemingly "stuck"
+ /// request. Also note that the message content may be partially cut off if
+ /// `finish_reason="length"`, which indicates the generation exceeded
+ /// `max_tokens` or the conversation exceeded the max context length. /// /// /// This feature is in Beta.
@@ -125,7 +166,10 @@ public partial interface IChatClient /// Example: 1 /// /// - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// An alternative to sampling with temperature, called nucleus sampling,
+ /// where the model considers the results of the tokens with top_p probability
+ /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
+ /// are considered.
/// We generally recommend altering this or `temperature` but not both.
/// Default Value: 1
/// Example: 1 @@ -154,6 +198,7 @@ public partial interface IChatClient global::System.Collections.Generic.IList messages, global::OpenAI.AnyOf model, bool? store = default, + global::OpenAI.CreateChatCompletionRequestReasoningEffort? reasoningEffort = default, global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = default, global::System.Collections.Generic.Dictionary? logitBias = default, diff --git a/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs b/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs index bbc75d8f..de73f7d2 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IFineTuningClient.CreateFineTuningJob.g.cs @@ -30,13 +30,10 @@ public partial interface IFineTuningClient /// The ID of an uploaded file that contains training data.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
/// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.
- /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.
+ /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input), [completions](/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](/docs/api-reference/fine-tuning/preference-input) format.
/// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
/// Example: file-abc123 /// - /// - /// The hyperparameters used for the fine-tuning job. - /// /// /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @@ -59,16 +56,19 @@ public partial interface IFineTuningClient /// If a seed is not specified, one will be generated for you.
/// Example: 42 /// + /// + /// The method used for fine-tuning. + /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task CreateFineTuningJobAsync( global::OpenAI.AnyOf model, string trainingFile, - global::OpenAI.CreateFineTuningJobRequestHyperparameters? hyperparameters = default, string? suffix = default, string? validationFile = default, global::System.Collections.Generic.IList? integrations = default, int? seed = default, + global::OpenAI.FineTuneMethod? method = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs b/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs index 80d7f819..76bf5655 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs @@ -122,6 +122,11 @@ public partial interface IOpenAiApi : global::System.IDisposable /// public UsersClient Users { get; } + /// + /// + /// + public RealtimeClient Realtime { get; } + /// /// /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IRealtimeClient.CreateRealtimeSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.IRealtimeClient.CreateRealtimeSession.g.cs new file mode 100644 index 00000000..063970ae --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.IRealtimeClient.CreateRealtimeSession.g.cs @@ -0,0 +1,107 @@ +#nullable enable + +namespace OpenAI +{ + public partial interface IRealtimeClient + { + /// + /// Create an ephemeral API token for use in client-side applications with the
+ /// Realtime API. Can be configured with the same session parameters as the
+ /// `session.update` client event.
+ /// It responds with a session object, plus a `client_secret` key which contains
+ /// a usable ephemeral API token that can be used to authenticate browser clients
+ /// for the Realtime API. + ///
+ /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task CreateRealtimeSessionAsync( + global::OpenAI.RealtimeSessionCreateRequest request, + global::System.Threading.CancellationToken cancellationToken = default); + + /// + /// Create an ephemeral API token for use in client-side applications with the
+ /// Realtime API. Can be configured with the same session parameters as the
+ /// `session.update` client event.
+ /// It responds with a session object, plus a `client_secret` key which contains
+ /// a usable ephemeral API token that can be used to authenticate browser clients
+ /// for the Realtime API. + ///
+ /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The Realtime model used for this session. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + /// + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + /// + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task CreateRealtimeSessionAsync( + global::OpenAI.RealtimeSessionCreateRequestModel model, + global::System.Collections.Generic.IList? modalities = default, + string? instructions = default, + global::OpenAI.RealtimeSessionCreateRequestVoice? voice = default, + global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? inputAudioFormat = default, + global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? outputAudioFormat = default, + global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? inputAudioTranscription = default, + global::OpenAI.RealtimeSessionCreateRequestTurnDetection? turnDetection = default, + global::System.Collections.Generic.IList? tools = default, + string? toolChoice = default, + double? temperature = default, + global::OpenAI.OneOf? maxResponseOutputTokens = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.IRealtimeClient.g.cs b/src/libs/OpenAI/Generated/OpenAI.IRealtimeClient.g.cs new file mode 100644 index 00000000..2f5fcd44 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.IRealtimeClient.g.cs @@ -0,0 +1,40 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// If no httpClient is provided, a new one will be created.
+ /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + ///
+ public partial interface IRealtimeClient : global::System.IDisposable + { + /// + /// The HttpClient instance. + /// + public global::System.Net.Http.HttpClient HttpClient { get; } + + /// + /// The base URL for the API. + /// + public System.Uri? BaseUri { get; } + + /// + /// The authorizations to use for the requests. + /// + public global::System.Collections.Generic.List Authorizations { get; } + + /// + /// Gets or sets a value indicating whether the response content should be read as a string. + /// True by default in debug builds, false otherwise. + /// + public bool ReadResponseAsString { get; set; } + + /// + /// + /// + global::System.Text.Json.Serialization.JsonSerializerContext JsonSerializerContext { get; set; } + + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs index 8b5bafc9..b9bee35c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs @@ -6,7 +6,7 @@ namespace OpenAI { /// - /// + /// Messages sent by the model in response to user messages. /// public sealed partial class ChatCompletionRequestAssistantMessage { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessage.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessage.Json.g.cs new file mode 100644 index 00000000..3f3d8780 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessage.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class ChatCompletionRequestDeveloperMessage + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.ChatCompletionRequestDeveloperMessage? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.ChatCompletionRequestDeveloperMessage), + jsonSerializerContext) as global::OpenAI.ChatCompletionRequestDeveloperMessage; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.ChatCompletionRequestDeveloperMessage? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.ChatCompletionRequestDeveloperMessage), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.ChatCompletionRequestDeveloperMessage; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessage.g.cs new file mode 100644 index 00000000..dde977bc --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessage.g.cs @@ -0,0 +1,72 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// Developer-provided instructions that the model should follow, regardless of
+ /// messages sent by the user. With o1 models and newer, `developer` messages
+ /// replace the previous `system` messages. + ///
+ public sealed partial class ChatCompletionRequestDeveloperMessage + { + /// + /// The contents of the developer message. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter>))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.OneOf> Content { get; set; } + + /// + /// The role of the messages author, in this case `developer`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("role")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.ChatCompletionRequestDeveloperMessageRoleJsonConverter))] + public global::OpenAI.ChatCompletionRequestDeveloperMessageRole Role { get; set; } + + /// + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The contents of the developer message. + /// + /// + /// The role of the messages author, in this case `developer`. + /// + /// + /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ChatCompletionRequestDeveloperMessage( + global::OpenAI.OneOf> content, + global::OpenAI.ChatCompletionRequestDeveloperMessageRole role, + string? name) + { + this.Content = content; + this.Role = role; + this.Name = name; + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestDeveloperMessage() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessageRole.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessageRole.g.cs new file mode 100644 index 00000000..f2060b55 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestDeveloperMessageRole.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The role of the messages author, in this case `developer`. + /// + public enum ChatCompletionRequestDeveloperMessageRole + { + /// + /// + /// + Developer, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestDeveloperMessageRoleExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestDeveloperMessageRole value) + { + return value switch + { + ChatCompletionRequestDeveloperMessageRole.Developer => "developer", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestDeveloperMessageRole? ToEnum(string value) + { + return value switch + { + "developer" => ChatCompletionRequestDeveloperMessageRole.Developer, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessage.g.cs index 2aa90b51..c0bab645 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessage.g.cs @@ -14,9 +14,48 @@ namespace OpenAI /// public global::OpenAI.ChatCompletionRequestMessageDiscriminatorRole? Role { get; } + /// + /// Developer-provided instructions that the model should follow, regardless of
+ /// messages sent by the user. With o1 models and newer, `developer` messages
+ /// replace the previous `system` messages. + ///
+#if NET6_0_OR_GREATER + public global::OpenAI.ChatCompletionRequestDeveloperMessage? Developer { get; init; } +#else + public global::OpenAI.ChatCompletionRequestDeveloperMessage? Developer { get; } +#endif + /// /// /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Developer))] +#endif + public bool IsDeveloper => Developer != null; + + /// + /// + /// + public static implicit operator ChatCompletionRequestMessage(global::OpenAI.ChatCompletionRequestDeveloperMessage value) => new ChatCompletionRequestMessage(value); + + /// + /// + /// + public static implicit operator global::OpenAI.ChatCompletionRequestDeveloperMessage?(ChatCompletionRequestMessage @this) => @this.Developer; + + /// + /// + /// + public ChatCompletionRequestMessage(global::OpenAI.ChatCompletionRequestDeveloperMessage? value) + { + Developer = value; + } + + /// + /// Developer-provided instructions that the model should follow, regardless of
+ /// messages sent by the user. With o1 models and newer, use `developer` messages
+ /// for this purpose instead. + ///
#if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestSystemMessage? System { get; init; } #else @@ -50,7 +89,8 @@ public ChatCompletionRequestMessage(global::OpenAI.ChatCompletionRequestSystemMe } /// - /// + /// Messages sent by an end user, containing prompts or additional context
+ /// information. ///
#if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestUserMessage? User { get; init; } @@ -85,7 +125,7 @@ public ChatCompletionRequestMessage(global::OpenAI.ChatCompletionRequestUserMess } /// - /// + /// Messages sent by the model in response to user messages. /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestAssistantMessage? Assistant { get; init; } @@ -194,6 +234,7 @@ public ChatCompletionRequestMessage(global::OpenAI.ChatCompletionRequestFunction /// public ChatCompletionRequestMessage( global::OpenAI.ChatCompletionRequestMessageDiscriminatorRole? role, + global::OpenAI.ChatCompletionRequestDeveloperMessage? developer, global::OpenAI.ChatCompletionRequestSystemMessage? system, global::OpenAI.ChatCompletionRequestUserMessage? user, global::OpenAI.ChatCompletionRequestAssistantMessage? assistant, @@ -203,6 +244,7 @@ public ChatCompletionRequestMessage( { Role = role; + Developer = developer; System = system; User = user; Assistant = assistant; @@ -218,7 +260,8 @@ Function as object ?? Tool as object ?? Assistant as object ?? User as object ?? - System as object + System as object ?? + Developer as object ; /// @@ -226,13 +269,14 @@ System as object /// public bool Validate() { - return IsSystem && !IsUser && !IsAssistant && !IsTool && !IsFunction || !IsSystem && IsUser && !IsAssistant && !IsTool && !IsFunction || !IsSystem && !IsUser && IsAssistant && !IsTool && !IsFunction || !IsSystem && !IsUser && !IsAssistant && IsTool && !IsFunction || !IsSystem && !IsUser && !IsAssistant && !IsTool && IsFunction; + return IsDeveloper && !IsSystem && !IsUser && !IsAssistant && !IsTool && !IsFunction || !IsDeveloper && IsSystem && !IsUser && !IsAssistant && !IsTool && !IsFunction || !IsDeveloper && !IsSystem && IsUser && !IsAssistant && !IsTool && !IsFunction || !IsDeveloper && !IsSystem && !IsUser && IsAssistant && !IsTool && !IsFunction || !IsDeveloper && !IsSystem && !IsUser && !IsAssistant && IsTool && !IsFunction || !IsDeveloper && !IsSystem && !IsUser && !IsAssistant && !IsTool && IsFunction; } /// /// /// public TResult? Match( + global::System.Func? developer = null, global::System.Func? system = null, global::System.Func? user = null, global::System.Func? assistant = null, @@ -245,7 +289,11 @@ public bool Validate() Validate(); } - if (IsSystem && system != null) + if (IsDeveloper && developer != null) + { + return developer(Developer!); + } + else if (IsSystem && system != null) { return system(System!); } @@ -273,6 +321,7 @@ public bool Validate() /// /// public void Match( + global::System.Action? developer = null, global::System.Action? system = null, global::System.Action? user = null, global::System.Action? assistant = null, @@ -285,7 +334,11 @@ public void Match( Validate(); } - if (IsSystem) + if (IsDeveloper) + { + developer?.Invoke(Developer!); + } + else if (IsSystem) { system?.Invoke(System!); } @@ -314,6 +367,8 @@ public override int GetHashCode() { var fields = new object?[] { + Developer, + typeof(global::OpenAI.ChatCompletionRequestDeveloperMessage), System, typeof(global::OpenAI.ChatCompletionRequestSystemMessage), User, @@ -340,6 +395,7 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ChatCompletionRequestMessage other) { return + global::System.Collections.Generic.EqualityComparer.Default.Equals(Developer, other.Developer) && global::System.Collections.Generic.EqualityComparer.Default.Equals(System, other.System) && global::System.Collections.Generic.EqualityComparer.Default.Equals(User, other.User) && global::System.Collections.Generic.EqualityComparer.Default.Equals(Assistant, other.Assistant) && diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageDiscriminatorRole.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageDiscriminatorRole.g.cs index 7571d707..1d32c71c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageDiscriminatorRole.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageDiscriminatorRole.g.cs @@ -8,6 +8,10 @@ namespace OpenAI /// public enum ChatCompletionRequestMessageDiscriminatorRole { + /// + /// + /// + Developer, /// /// /// @@ -42,6 +46,7 @@ public static string ToValueString(this ChatCompletionRequestMessageDiscriminato { return value switch { + ChatCompletionRequestMessageDiscriminatorRole.Developer => "developer", ChatCompletionRequestMessageDiscriminatorRole.System => "system", ChatCompletionRequestMessageDiscriminatorRole.User => "user", ChatCompletionRequestMessageDiscriminatorRole.Assistant => "assistant", @@ -57,6 +62,7 @@ public static string ToValueString(this ChatCompletionRequestMessageDiscriminato { return value switch { + "developer" => ChatCompletionRequestMessageDiscriminatorRole.Developer, "system" => ChatCompletionRequestMessageDiscriminatorRole.System, "user" => ChatCompletionRequestMessageDiscriminatorRole.User, "assistant" => ChatCompletionRequestMessageDiscriminatorRole.Assistant, diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessage.g.cs index dbe698d1..539c4df2 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessage.g.cs @@ -6,7 +6,9 @@ namespace OpenAI { /// - /// + /// Developer-provided instructions that the model should follow, regardless of
+ /// messages sent by the user. With o1 models and newer, use `developer` messages
+ /// for this purpose instead. ///
public sealed partial class ChatCompletionRequestSystemMessage { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessage.g.cs index 630d8039..89c92d9a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessage.g.cs @@ -6,7 +6,8 @@ namespace OpenAI { /// - /// + /// Messages sent by an end user, containing prompts or additional context
+ /// information. ///
public sealed partial class ChatCompletionRequestUserMessage { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs index a3b6b633..d5a91507 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs @@ -31,13 +31,27 @@ public sealed partial class CreateChatCompletionRequest public required global::OpenAI.AnyOf Model { get; set; } /// - /// Whether or not to store the output of this chat completion request
- /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Whether or not to store the output of this chat completion request for
+ /// use in our [model distillation](/docs/guides/distillation) or
+ /// [evals](/docs/guides/evals) products.
/// Default Value: false ///
[global::System.Text.Json.Serialization.JsonPropertyName("store")] public bool? Store { get; set; } + /// + /// **o1 models only**
+ /// Constrains effort on reasoning for
+ /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ /// Currently supported values are `low`, `medium`, and `high`. Reducing
+ /// reasoning effort can result in faster responses and fewer tokens used
+ /// on reasoning in a response.
+ /// Default Value: medium + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("reasoning_effort")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestReasoningEffortJsonConverter))] + public global::OpenAI.CreateChatCompletionRequestReasoningEffort? ReasoningEffort { get; set; } + /// /// Developer-defined tags and values used for filtering completions
/// in the [dashboard](https://platform.openai.com/chat-completions). @@ -46,8 +60,9 @@ public sealed partial class CreateChatCompletionRequest public global::System.Collections.Generic.Dictionary? Metadata { get; set; } /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// their existing frequency in the text so far, decreasing the model's
+ /// likelihood to repeat the same line verbatim.
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("frequency_penalty")] @@ -55,27 +70,39 @@ public sealed partial class CreateChatCompletionRequest /// /// Modify the likelihood of specified tokens appearing in the completion.
- /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the
+ /// tokenizer) to an associated bias value from -100 to 100. Mathematically,
+ /// the bias is added to the logits generated by the model prior to sampling.
+ /// The exact effect will vary per model, but values between -1 and 1 should
+ /// decrease or increase likelihood of selection; values like -100 or 100
+ /// should result in a ban or exclusive selection of the relevant token. ///
[global::System.Text.Json.Serialization.JsonPropertyName("logit_bias")] public global::System.Collections.Generic.Dictionary? LogitBias { get; set; } /// - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
+ /// Whether to return log probabilities of the output tokens or not. If true,
+ /// returns the log probabilities of each output token returned in the
+ /// `content` of `message`.
/// Default Value: false ///
[global::System.Text.Json.Serialization.JsonPropertyName("logprobs")] public bool? Logprobs { get; set; } /// - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to
+ /// return at each token position, each with an associated log probability.
+ /// `logprobs` must be set to `true` if this parameter is used. ///
[global::System.Text.Json.Serialization.JsonPropertyName("top_logprobs")] public int? TopLogprobs { get; set; } /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.
- /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). + /// The maximum number of [tokens](/tokenizer) that can be generated in the
+ /// chat completion. This value can be used to control
+ /// [costs](https://openai.com/api/pricing/) for text generated via API.
+ /// This value is now deprecated in favor of `max_completion_tokens`, and is
+ /// not compatible with [o1 series models](/docs/guides/reasoning). ///
[global::System.Text.Json.Serialization.JsonPropertyName("max_tokens")] [global::System.Obsolete("This property marked as deprecated.")] @@ -125,18 +152,29 @@ public sealed partial class CreateChatCompletionRequest public global::OpenAI.CreateChatCompletionRequestAudio? Audio { get; set; } /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// whether they appear in the text so far, increasing the model's likelihood
+ /// to talk about new topics.
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")] public double? PresencePenalty { get; set; } /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
- /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// An object specifying the format that the model must output.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables
+ /// Structured Outputs which ensures the model will match your supplied JSON
+ /// schema. Learn more in the [Structured Outputs
+ /// guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures
+ /// the message the model generates is valid JSON.
+ /// **Important:** when using JSON mode, you **must** also instruct the model
+ /// to produce JSON yourself via a system or user message. Without this, the
+ /// model may generate an unending stream of whitespace until the generation
+ /// reaches the token limit, resulting in a long-running and seemingly "stuck"
+ /// request. Also note that the message content may be partially cut off if
+ /// `finish_reason="length"`, which indicates the generation exceeded
+ /// `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.ResponseFormatJsonConverter))] @@ -194,7 +232,10 @@ public sealed partial class CreateChatCompletionRequest public double? Temperature { get; set; } /// - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// An alternative to sampling with temperature, called nucleus sampling,
+ /// where the model considers the results of the tokens with top_p probability
+ /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
+ /// are considered.
/// We generally recommend altering this or `temperature` but not both.
/// Default Value: 1
/// Example: 1 @@ -238,10 +279,14 @@ public sealed partial class CreateChatCompletionRequest /// /// Deprecated in favor of `tool_choice`.
/// Controls which (if any) function is called by the model.
- /// `none` means the model will not call a function and instead generates a message.
- /// `auto` means the model can pick between generating a message or calling a function.
- /// Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
- /// `none` is the default when no functions are present. `auto` is the default if functions are present. + /// `none` means the model will not call a function and instead generates a
+ /// message.
+ /// `auto` means the model can pick between generating a message or calling a
+ /// function.
+ /// Specifying a particular function via `{"name": "my_function"}` forces the
+ /// model to call that function.
+ /// `none` is the default when no functions are present. `auto` is the default
+ /// if functions are present. ///
[global::System.Text.Json.Serialization.JsonPropertyName("function_call")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] @@ -275,29 +320,49 @@ public sealed partial class CreateChatCompletionRequest /// Example: gpt-4o /// /// - /// Whether or not to store the output of this chat completion request
- /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Whether or not to store the output of this chat completion request for
+ /// use in our [model distillation](/docs/guides/distillation) or
+ /// [evals](/docs/guides/evals) products.
/// Default Value: false /// + /// + /// **o1 models only**
+ /// Constrains effort on reasoning for
+ /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ /// Currently supported values are `low`, `medium`, and `high`. Reducing
+ /// reasoning effort can result in faster responses and fewer tokens used
+ /// on reasoning in a response.
+ /// Default Value: medium + /// /// /// Developer-defined tags and values used for filtering completions
/// in the [dashboard](https://platform.openai.com/chat-completions). /// /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// their existing frequency in the text so far, decreasing the model's
+ /// likelihood to repeat the same line verbatim.
/// Default Value: 0 /// /// /// Modify the likelihood of specified tokens appearing in the completion.
- /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the
+ /// tokenizer) to an associated bias value from -100 to 100. Mathematically,
+ /// the bias is added to the logits generated by the model prior to sampling.
+ /// The exact effect will vary per model, but values between -1 and 1 should
+ /// decrease or increase likelihood of selection; values like -100 or 100
+ /// should result in a ban or exclusive selection of the relevant token. /// /// - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
+ /// Whether to return log probabilities of the output tokens or not. If true,
+ /// returns the log probabilities of each output token returned in the
+ /// `content` of `message`.
/// Default Value: false /// /// - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to
+ /// return at each token position, each with an associated log probability.
+ /// `logprobs` must be set to `true` if this parameter is used. /// /// /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). @@ -327,15 +392,26 @@ public sealed partial class CreateChatCompletionRequest /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). /// /// - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
+ /// whether they appear in the text so far, increasing the model's likelihood
+ /// to talk about new topics.
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
- /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// An object specifying the format that the model must output.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables
+ /// Structured Outputs which ensures the model will match your supplied JSON
+ /// schema. Learn more in the [Structured Outputs
+ /// guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures
+ /// the message the model generates is valid JSON.
+ /// **Important:** when using JSON mode, you **must** also instruct the model
+ /// to produce JSON yourself via a system or user message. Without this, the
+ /// model may generate an unending stream of whitespace until the generation
+ /// reaches the token limit, resulting in a long-running and seemingly "stuck"
+ /// request. Also note that the message content may be partially cut off if
+ /// `finish_reason="length"`, which indicates the generation exceeded
+ /// `max_tokens` or the conversation exceeded the max context length. /// /// /// This feature is in Beta.
@@ -368,7 +444,10 @@ public sealed partial class CreateChatCompletionRequest /// Example: 1 /// /// - /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// An alternative to sampling with temperature, called nucleus sampling,
+ /// where the model considers the results of the tokens with top_p probability
+ /// mass. So 0.1 means only the tokens comprising the top 10% probability mass
+ /// are considered.
/// We generally recommend altering this or `temperature` but not both.
/// Default Value: 1
/// Example: 1 @@ -396,6 +475,7 @@ public CreateChatCompletionRequest( global::System.Collections.Generic.IList messages, global::OpenAI.AnyOf model, bool? store, + global::OpenAI.CreateChatCompletionRequestReasoningEffort? reasoningEffort, global::System.Collections.Generic.Dictionary? metadata, double? frequencyPenalty, global::System.Collections.Generic.Dictionary? logitBias, @@ -423,6 +503,7 @@ public CreateChatCompletionRequest( this.Messages = messages ?? throw new global::System.ArgumentNullException(nameof(messages)); this.Model = model; this.Store = store; + this.ReasoningEffort = reasoningEffort; this.Metadata = metadata; this.FrequencyPenalty = frequencyPenalty; this.LogitBias = logitBias; diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestLogitBias.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestLogitBias.g.cs index 84943f55..8de836a4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestLogitBias.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestLogitBias.g.cs @@ -5,7 +5,12 @@ namespace OpenAI { /// /// Modify the likelihood of specified tokens appearing in the completion.
- /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Accepts a JSON object that maps tokens (specified by their token ID in the
+ /// tokenizer) to an associated bias value from -100 to 100. Mathematically,
+ /// the bias is added to the logits generated by the model prior to sampling.
+ /// The exact effect will vary per model, but values between -1 and 1 should
+ /// decrease or increase likelihood of selection; values like -100 or 100
+ /// should result in a ban or exclusive selection of the relevant token. ///
public sealed partial class CreateChatCompletionRequestLogitBias { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs index 386b03f3..b1e28d9c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs @@ -8,6 +8,14 @@ namespace OpenAI ///
public enum CreateChatCompletionRequestModel { + /// + /// + /// + O1, + /// + /// + /// + O120241217, /// /// /// @@ -43,19 +51,23 @@ public enum CreateChatCompletionRequestModel /// /// /// - Gpt4oRealtimePreview, + Gpt4oAudioPreview, /// /// /// - Gpt4oRealtimePreview20241001, + Gpt4oAudioPreview20241001, /// /// /// - Gpt4oAudioPreview, + Gpt4oAudioPreview20241217, /// /// /// - Gpt4oAudioPreview20241001, + Gpt4oMiniAudioPreview, + /// + /// + /// + Gpt4oMiniAudioPreview20241217, /// /// /// @@ -158,6 +170,8 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) { return value switch { + CreateChatCompletionRequestModel.O1 => "o1", + CreateChatCompletionRequestModel.O120241217 => "o1-2024-12-17", CreateChatCompletionRequestModel.O1Preview => "o1-preview", CreateChatCompletionRequestModel.O1Preview20240912 => "o1-preview-2024-09-12", CreateChatCompletionRequestModel.O1Mini => "o1-mini", @@ -166,10 +180,11 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) CreateChatCompletionRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateChatCompletionRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateChatCompletionRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", - CreateChatCompletionRequestModel.Gpt4oRealtimePreview => "gpt-4o-realtime-preview", - CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001 => "gpt-4o-realtime-preview-2024-10-01", CreateChatCompletionRequestModel.Gpt4oAudioPreview => "gpt-4o-audio-preview", CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001 => "gpt-4o-audio-preview-2024-10-01", + CreateChatCompletionRequestModel.Gpt4oAudioPreview20241217 => "gpt-4o-audio-preview-2024-12-17", + CreateChatCompletionRequestModel.Gpt4oMiniAudioPreview => "gpt-4o-mini-audio-preview", + CreateChatCompletionRequestModel.Gpt4oMiniAudioPreview20241217 => "gpt-4o-mini-audio-preview-2024-12-17", CreateChatCompletionRequestModel.Chatgpt4oLatest => "chatgpt-4o-latest", CreateChatCompletionRequestModel.Gpt4oMini => "gpt-4o-mini", CreateChatCompletionRequestModel.Gpt4oMini20240718 => "gpt-4o-mini-2024-07-18", @@ -202,6 +217,8 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) { return value switch { + "o1" => CreateChatCompletionRequestModel.O1, + "o1-2024-12-17" => CreateChatCompletionRequestModel.O120241217, "o1-preview" => CreateChatCompletionRequestModel.O1Preview, "o1-preview-2024-09-12" => CreateChatCompletionRequestModel.O1Preview20240912, "o1-mini" => CreateChatCompletionRequestModel.O1Mini, @@ -210,10 +227,11 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) "gpt-4o-2024-11-20" => CreateChatCompletionRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateChatCompletionRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateChatCompletionRequestModel.Gpt4o20240513, - "gpt-4o-realtime-preview" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview, - "gpt-4o-realtime-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001, "gpt-4o-audio-preview" => CreateChatCompletionRequestModel.Gpt4oAudioPreview, "gpt-4o-audio-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001, + "gpt-4o-audio-preview-2024-12-17" => CreateChatCompletionRequestModel.Gpt4oAudioPreview20241217, + "gpt-4o-mini-audio-preview" => CreateChatCompletionRequestModel.Gpt4oMiniAudioPreview, + "gpt-4o-mini-audio-preview-2024-12-17" => CreateChatCompletionRequestModel.Gpt4oMiniAudioPreview20241217, "chatgpt-4o-latest" => CreateChatCompletionRequestModel.Chatgpt4oLatest, "gpt-4o-mini" => CreateChatCompletionRequestModel.Gpt4oMini, "gpt-4o-mini-2024-07-18" => CreateChatCompletionRequestModel.Gpt4oMini20240718, diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestReasoningEffort.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestReasoningEffort.g.cs new file mode 100644 index 00000000..8c7ca931 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestReasoningEffort.g.cs @@ -0,0 +1,63 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// **o1 models only**
+ /// Constrains effort on reasoning for
+ /// [reasoning models](https://platform.openai.com/docs/guides/reasoning).
+ /// Currently supported values are `low`, `medium`, and `high`. Reducing
+ /// reasoning effort can result in faster responses and fewer tokens used
+ /// on reasoning in a response.
+ /// Default Value: medium + ///
+ public enum CreateChatCompletionRequestReasoningEffort + { + /// + /// + /// + Low, + /// + /// + /// + Medium, + /// + /// + /// + High, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestReasoningEffortExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestReasoningEffort value) + { + return value switch + { + CreateChatCompletionRequestReasoningEffort.Low => "low", + CreateChatCompletionRequestReasoningEffort.Medium => "medium", + CreateChatCompletionRequestReasoningEffort.High => "high", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestReasoningEffort? ToEnum(string value) + { + return value switch + { + "low" => CreateChatCompletionRequestReasoningEffort.Low, + "medium" => CreateChatCompletionRequestReasoningEffort.Medium, + "high" => CreateChatCompletionRequestReasoningEffort.High, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs index fae5b695..ffcf598f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequest.g.cs @@ -25,7 +25,7 @@ public sealed partial class CreateFineTuningJobRequest /// The ID of an uploaded file that contains training data.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
/// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.
- /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.
+ /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input), [completions](/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](/docs/api-reference/fine-tuning/preference-input) format.
/// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
/// Example: file-abc123 ///
@@ -35,9 +35,11 @@ public sealed partial class CreateFineTuningJobRequest public required string TrainingFile { get; set; } /// - /// The hyperparameters used for the fine-tuning job. + /// The hyperparameters used for the fine-tuning job.
+ /// This value is now deprecated in favor of `method`, and should be passed in under the `method` parameter. ///
[global::System.Text.Json.Serialization.JsonPropertyName("hyperparameters")] + [global::System.Obsolete("This property marked as deprecated.")] public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Hyperparameters { get; set; } /// @@ -76,6 +78,12 @@ public sealed partial class CreateFineTuningJobRequest [global::System.Text.Json.Serialization.JsonPropertyName("seed")] public int? Seed { get; set; } + /// + /// The method used for fine-tuning. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("method")] + public global::OpenAI.FineTuneMethod? Method { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -94,13 +102,10 @@ public sealed partial class CreateFineTuningJobRequest /// The ID of an uploaded file that contains training data.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
/// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.
- /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.
+ /// The contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input), [completions](/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](/docs/api-reference/fine-tuning/preference-input) format.
/// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
/// Example: file-abc123 /// - /// - /// The hyperparameters used for the fine-tuning job. - /// /// /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @@ -123,23 +128,26 @@ public sealed partial class CreateFineTuningJobRequest /// If a seed is not specified, one will be generated for you.
/// Example: 42 /// + /// + /// The method used for fine-tuning. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateFineTuningJobRequest( global::OpenAI.AnyOf model, string trainingFile, - global::OpenAI.CreateFineTuningJobRequestHyperparameters? hyperparameters, string? suffix, string? validationFile, global::System.Collections.Generic.IList? integrations, - int? seed) + int? seed, + global::OpenAI.FineTuneMethod? method) { this.Model = model; this.TrainingFile = trainingFile ?? throw new global::System.ArgumentNullException(nameof(trainingFile)); - this.Hyperparameters = hyperparameters; this.Suffix = suffix; this.ValidationFile = validationFile; this.Integrations = integrations; this.Seed = seed; + this.Method = method; } /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequestHyperparameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequestHyperparameters.g.cs index 8e257553..dbf4cba6 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequestHyperparameters.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateFineTuningJobRequestHyperparameters.g.cs @@ -6,8 +6,10 @@ namespace OpenAI { /// - /// The hyperparameters used for the fine-tuning job. + /// The hyperparameters used for the fine-tuning job.
+ /// This value is now deprecated in favor of `method`, and should be passed in under the `method` parameter. ///
+ [global::System.Obsolete("This model marked as deprecated.")] public sealed partial class CreateFineTuningJobRequestHyperparameters { /// @@ -17,6 +19,7 @@ public sealed partial class CreateFineTuningJobRequestHyperparameters /// [global::System.Text.Json.Serialization.JsonPropertyName("batch_size")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + [global::System.Obsolete("This property marked as deprecated.")] public global::OpenAI.OneOf? BatchSize { get; set; } /// @@ -26,6 +29,7 @@ public sealed partial class CreateFineTuningJobRequestHyperparameters /// [global::System.Text.Json.Serialization.JsonPropertyName("learning_rate_multiplier")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + [global::System.Obsolete("This property marked as deprecated.")] public global::OpenAI.OneOf? LearningRateMultiplier { get; set; } /// @@ -35,6 +39,7 @@ public sealed partial class CreateFineTuningJobRequestHyperparameters /// [global::System.Text.Json.Serialization.JsonPropertyName("n_epochs")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + [global::System.Obsolete("This property marked as deprecated.")] public global::OpenAI.OneOf? NEpochs { get; set; } /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatCompletionRequestAssistantMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatCompletionRequestAssistantMessage.g.cs index 68b47d06..6dd0cbc4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatCompletionRequestAssistantMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatCompletionRequestAssistantMessage.g.cs @@ -45,7 +45,7 @@ public FineTuneChatCompletionRequestAssistantMessage(global::OpenAI.FineTuneChat } /// - /// + /// Messages sent by the model in response to user messages. /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestAssistantMessage? Value2 { get; init; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatRequestInput.Json.g.cs similarity index 87% rename from src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.Json.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatRequestInput.Json.g.cs index 46766db9..84c379a6 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.Json.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatRequestInput.Json.g.cs @@ -2,7 +2,7 @@ namespace OpenAI { - public sealed partial class FinetuneChatRequestInput + public sealed partial class FineTuneChatRequestInput { /// /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. @@ -34,14 +34,14 @@ public string ToJson( /// /// Deserializes a JSON string using the provided JsonSerializerContext. /// - public static global::OpenAI.FinetuneChatRequestInput? FromJson( + public static global::OpenAI.FineTuneChatRequestInput? FromJson( string json, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return global::System.Text.Json.JsonSerializer.Deserialize( json, - typeof(global::OpenAI.FinetuneChatRequestInput), - jsonSerializerContext) as global::OpenAI.FinetuneChatRequestInput; + typeof(global::OpenAI.FineTuneChatRequestInput), + jsonSerializerContext) as global::OpenAI.FineTuneChatRequestInput; } /// @@ -51,11 +51,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::OpenAI.FinetuneChatRequestInput? FromJson( + public static global::OpenAI.FineTuneChatRequestInput? FromJson( string json, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.Deserialize( + return global::System.Text.Json.JsonSerializer.Deserialize( json, jsonSerializerOptions); } @@ -63,14 +63,14 @@ public string ToJson( /// /// Deserializes a JSON stream using the provided JsonSerializerContext. /// - public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( jsonStream, - typeof(global::OpenAI.FinetuneChatRequestInput), - jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FinetuneChatRequestInput; + typeof(global::OpenAI.FineTuneChatRequestInput), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneChatRequestInput; } /// @@ -80,11 +80,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.DeserializeAsync( + return global::System.Text.Json.JsonSerializer.DeserializeAsync( jsonStream, jsonSerializerOptions); } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatRequestInput.g.cs similarity index 91% rename from src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatRequestInput.g.cs index 429c093b..9a882230 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneChatRequestInput.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneChatRequestInput.g.cs @@ -6,9 +6,9 @@ namespace OpenAI { /// - /// The per-line training example of a fine-tuning input file for chat models + /// The per-line training example of a fine-tuning input file for chat models using the supervised method. /// - public sealed partial class FinetuneChatRequestInput + public sealed partial class FineTuneChatRequestInput { /// /// @@ -42,7 +42,7 @@ public sealed partial class FinetuneChatRequestInput public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// /// @@ -52,7 +52,7 @@ public sealed partial class FinetuneChatRequestInput /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] - public FinetuneChatRequestInput( + public FineTuneChatRequestInput( global::System.Collections.Generic.IList>? messages, global::System.Collections.Generic.IList? tools, bool? parallelToolCalls) @@ -63,9 +63,9 @@ public FinetuneChatRequestInput( } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public FinetuneChatRequestInput() + public FineTuneChatRequestInput() { } } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneCompletionRequestInput.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneCompletionRequestInput.Json.g.cs similarity index 87% rename from src/libs/OpenAI/Generated/OpenAI.Models.FinetuneCompletionRequestInput.Json.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.FineTuneCompletionRequestInput.Json.g.cs index 2e615c34..83e2da6f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneCompletionRequestInput.Json.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneCompletionRequestInput.Json.g.cs @@ -2,7 +2,7 @@ namespace OpenAI { - public sealed partial class FinetuneCompletionRequestInput + public sealed partial class FineTuneCompletionRequestInput { /// /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. @@ -34,14 +34,14 @@ public string ToJson( /// /// Deserializes a JSON string using the provided JsonSerializerContext. /// - public static global::OpenAI.FinetuneCompletionRequestInput? FromJson( + public static global::OpenAI.FineTuneCompletionRequestInput? FromJson( string json, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return global::System.Text.Json.JsonSerializer.Deserialize( json, - typeof(global::OpenAI.FinetuneCompletionRequestInput), - jsonSerializerContext) as global::OpenAI.FinetuneCompletionRequestInput; + typeof(global::OpenAI.FineTuneCompletionRequestInput), + jsonSerializerContext) as global::OpenAI.FineTuneCompletionRequestInput; } /// @@ -51,11 +51,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::OpenAI.FinetuneCompletionRequestInput? FromJson( + public static global::OpenAI.FineTuneCompletionRequestInput? FromJson( string json, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.Deserialize( + return global::System.Text.Json.JsonSerializer.Deserialize( json, jsonSerializerOptions); } @@ -63,14 +63,14 @@ public string ToJson( /// /// Deserializes a JSON stream using the provided JsonSerializerContext. /// - public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( jsonStream, - typeof(global::OpenAI.FinetuneCompletionRequestInput), - jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FinetuneCompletionRequestInput; + typeof(global::OpenAI.FineTuneCompletionRequestInput), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneCompletionRequestInput; } /// @@ -80,11 +80,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.DeserializeAsync( + return global::System.Text.Json.JsonSerializer.DeserializeAsync( jsonStream, jsonSerializerOptions); } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneCompletionRequestInput.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneCompletionRequestInput.g.cs similarity index 85% rename from src/libs/OpenAI/Generated/OpenAI.Models.FinetuneCompletionRequestInput.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.FineTuneCompletionRequestInput.g.cs index b576a612..d159c334 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FinetuneCompletionRequestInput.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneCompletionRequestInput.g.cs @@ -6,7 +6,7 @@ namespace OpenAI /// /// The per-line training example of a fine-tuning input file for completions models /// - public sealed partial class FinetuneCompletionRequestInput + public sealed partial class FineTuneCompletionRequestInput { /// /// The input prompt for this training example. @@ -27,7 +27,7 @@ public sealed partial class FinetuneCompletionRequestInput public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// /// The input prompt for this training example. @@ -36,7 +36,7 @@ public sealed partial class FinetuneCompletionRequestInput /// The desired completion for this training example. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] - public FinetuneCompletionRequestInput( + public FineTuneCompletionRequestInput( string? prompt, string? completion) { @@ -45,9 +45,9 @@ public FinetuneCompletionRequestInput( } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public FinetuneCompletionRequestInput() + public FineTuneCompletionRequestInput() { } } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethod.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethod.Json.g.cs new file mode 100644 index 00000000..fe2f772e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethod.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTuneDPOMethod + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTuneDPOMethod? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTuneDPOMethod), + jsonSerializerContext) as global::OpenAI.FineTuneDPOMethod; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTuneDPOMethod? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTuneDPOMethod), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneDPOMethod; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethod.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethod.g.cs new file mode 100644 index 00000000..8951c7b3 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethod.g.cs @@ -0,0 +1,43 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for the DPO fine-tuning method. + /// + public sealed partial class FineTuneDPOMethod + { + /// + /// The hyperparameters used for the fine-tuning job. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("hyperparameters")] + public global::OpenAI.FineTuneDPOMethodHyperparameters? Hyperparameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The hyperparameters used for the fine-tuning job. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTuneDPOMethod( + global::OpenAI.FineTuneDPOMethodHyperparameters? hyperparameters) + { + this.Hyperparameters = hyperparameters; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTuneDPOMethod() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparameters.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparameters.Json.g.cs new file mode 100644 index 00000000..16805e6f --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparameters.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTuneDPOMethodHyperparameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTuneDPOMethodHyperparameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTuneDPOMethodHyperparameters), + jsonSerializerContext) as global::OpenAI.FineTuneDPOMethodHyperparameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTuneDPOMethodHyperparameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTuneDPOMethodHyperparameters), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneDPOMethodHyperparameters; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparameters.g.cs new file mode 100644 index 00000000..63ca1469 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparameters.g.cs @@ -0,0 +1,90 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// The hyperparameters used for the fine-tuning job. + /// + public sealed partial class FineTuneDPOMethodHyperparameters + { + /// + /// The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("beta")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? Beta { get; set; } + + /// + /// Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("batch_size")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? BatchSize { get; set; } + + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("learning_rate_multiplier")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? LearningRateMultiplier { get; set; } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("n_epochs")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? NEpochs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model.
+ /// Default Value: auto + /// + /// + /// Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.
+ /// Default Value: auto + /// + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.
+ /// Default Value: auto + /// + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
+ /// Default Value: auto + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTuneDPOMethodHyperparameters( + global::OpenAI.OneOf? beta, + global::OpenAI.OneOf? batchSize, + global::OpenAI.OneOf? learningRateMultiplier, + global::OpenAI.OneOf? nEpochs) + { + this.Beta = beta; + this.BatchSize = batchSize; + this.LearningRateMultiplier = learningRateMultiplier; + this.NEpochs = nEpochs; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTuneDPOMethodHyperparameters() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersBatchSize.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersBatchSize.g.cs new file mode 100644 index 00000000..bfaa966c --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersBatchSize.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneDPOMethodHyperparametersBatchSize + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneDPOMethodHyperparametersBatchSizeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneDPOMethodHyperparametersBatchSize value) + { + return value switch + { + FineTuneDPOMethodHyperparametersBatchSize.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneDPOMethodHyperparametersBatchSize? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneDPOMethodHyperparametersBatchSize.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersBeta.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersBeta.g.cs new file mode 100644 index 00000000..996faa6b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersBeta.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneDPOMethodHyperparametersBeta + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneDPOMethodHyperparametersBetaExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneDPOMethodHyperparametersBeta value) + { + return value switch + { + FineTuneDPOMethodHyperparametersBeta.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneDPOMethodHyperparametersBeta? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneDPOMethodHyperparametersBeta.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersLearningRateMultiplier.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersLearningRateMultiplier.g.cs new file mode 100644 index 00000000..f884ffe9 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersLearningRateMultiplier.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneDPOMethodHyperparametersLearningRateMultiplier + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneDPOMethodHyperparametersLearningRateMultiplierExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneDPOMethodHyperparametersLearningRateMultiplier value) + { + return value switch + { + FineTuneDPOMethodHyperparametersLearningRateMultiplier.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneDPOMethodHyperparametersLearningRateMultiplier? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneDPOMethodHyperparametersLearningRateMultiplier.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersNEpochs.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersNEpochs.g.cs new file mode 100644 index 00000000..0996604b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneDPOMethodHyperparametersNEpochs.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneDPOMethodHyperparametersNEpochs + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneDPOMethodHyperparametersNEpochsExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneDPOMethodHyperparametersNEpochs value) + { + return value switch + { + FineTuneDPOMethodHyperparametersNEpochs.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneDPOMethodHyperparametersNEpochs? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneDPOMethodHyperparametersNEpochs.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethod.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethod.Json.g.cs new file mode 100644 index 00000000..bf2be6df --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethod.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTuneMethod + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTuneMethod? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTuneMethod), + jsonSerializerContext) as global::OpenAI.FineTuneMethod; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTuneMethod? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTuneMethod), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneMethod; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethod.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethod.g.cs new file mode 100644 index 00000000..9f667276 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethod.g.cs @@ -0,0 +1,66 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The method used for fine-tuning. + /// + public sealed partial class FineTuneMethod + { + /// + /// The type of method. Is either `supervised` or `dpo`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.FineTuneMethodTypeJsonConverter))] + public global::OpenAI.FineTuneMethodType? Type { get; set; } + + /// + /// Configuration for the supervised fine-tuning method. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("supervised")] + public global::OpenAI.FineTuneSupervisedMethod? Supervised { get; set; } + + /// + /// Configuration for the DPO fine-tuning method. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("dpo")] + public global::OpenAI.FineTuneDPOMethod? Dpo { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of method. Is either `supervised` or `dpo`. + /// + /// + /// Configuration for the supervised fine-tuning method. + /// + /// + /// Configuration for the DPO fine-tuning method. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTuneMethod( + global::OpenAI.FineTuneMethodType? type, + global::OpenAI.FineTuneSupervisedMethod? supervised, + global::OpenAI.FineTuneDPOMethod? dpo) + { + this.Type = type; + this.Supervised = supervised; + this.Dpo = dpo; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTuneMethod() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethodType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethodType.g.cs new file mode 100644 index 00000000..73920ba8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneMethodType.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The type of method. Is either `supervised` or `dpo`. + /// + public enum FineTuneMethodType + { + /// + /// + /// + Supervised, + /// + /// + /// + Dpo, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneMethodTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneMethodType value) + { + return value switch + { + FineTuneMethodType.Supervised => "supervised", + FineTuneMethodType.Dpo => "dpo", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneMethodType? ToEnum(string value) + { + return value switch + { + "supervised" => FineTuneMethodType.Supervised, + "dpo" => FineTuneMethodType.Dpo, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInput.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInput.Json.g.cs new file mode 100644 index 00000000..fce8ed2d --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInput.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTunePreferenceRequestInput + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTunePreferenceRequestInput? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTunePreferenceRequestInput), + jsonSerializerContext) as global::OpenAI.FineTunePreferenceRequestInput; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTunePreferenceRequestInput? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTunePreferenceRequestInput), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTunePreferenceRequestInput; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInput.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInput.g.cs new file mode 100644 index 00000000..1e2058aa --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInput.g.cs @@ -0,0 +1,65 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// The per-line training example of a fine-tuning input file for chat models using the dpo method. + /// + public sealed partial class FineTunePreferenceRequestInput + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input")] + public global::OpenAI.FineTunePreferenceRequestInputInput? Input { get; set; } + + /// + /// The preferred completion message for the output. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("preferred_completion")] + public global::System.Collections.Generic.IList>? PreferredCompletion { get; set; } + + /// + /// The non-preferred completion message for the output. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("non_preferred_completion")] + public global::System.Collections.Generic.IList>? NonPreferredCompletion { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The preferred completion message for the output. + /// + /// + /// The non-preferred completion message for the output. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTunePreferenceRequestInput( + global::OpenAI.FineTunePreferenceRequestInputInput? input, + global::System.Collections.Generic.IList>? preferredCompletion, + global::System.Collections.Generic.IList>? nonPreferredCompletion) + { + this.Input = input; + this.PreferredCompletion = preferredCompletion; + this.NonPreferredCompletion = nonPreferredCompletion; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTunePreferenceRequestInput() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInputInput.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInputInput.Json.g.cs new file mode 100644 index 00000000..5a0131ec --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInputInput.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTunePreferenceRequestInputInput + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTunePreferenceRequestInputInput? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTunePreferenceRequestInputInput), + jsonSerializerContext) as global::OpenAI.FineTunePreferenceRequestInputInput; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTunePreferenceRequestInputInput? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTunePreferenceRequestInputInput), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTunePreferenceRequestInputInput; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInputInput.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInputInput.g.cs new file mode 100644 index 00000000..0cf90970 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTunePreferenceRequestInputInput.g.cs @@ -0,0 +1,65 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class FineTunePreferenceRequestInputInput + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("messages")] + public global::System.Collections.Generic.IList>? Messages { get; set; } + + /// + /// A list of tools the model may generate JSON inputs for. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] + public bool? ParallelToolCalls { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// A list of tools the model may generate JSON inputs for. + /// + /// + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTunePreferenceRequestInputInput( + global::System.Collections.Generic.IList>? messages, + global::System.Collections.Generic.IList? tools, + bool? parallelToolCalls) + { + this.Messages = messages; + this.Tools = tools; + this.ParallelToolCalls = parallelToolCalls; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTunePreferenceRequestInputInput() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethod.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethod.Json.g.cs new file mode 100644 index 00000000..946330c4 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethod.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTuneSupervisedMethod + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTuneSupervisedMethod? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTuneSupervisedMethod), + jsonSerializerContext) as global::OpenAI.FineTuneSupervisedMethod; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTuneSupervisedMethod? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTuneSupervisedMethod), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneSupervisedMethod; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethod.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethod.g.cs new file mode 100644 index 00000000..ae4e6919 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethod.g.cs @@ -0,0 +1,43 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for the supervised fine-tuning method. + /// + public sealed partial class FineTuneSupervisedMethod + { + /// + /// The hyperparameters used for the fine-tuning job. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("hyperparameters")] + public global::OpenAI.FineTuneSupervisedMethodHyperparameters? Hyperparameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The hyperparameters used for the fine-tuning job. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTuneSupervisedMethod( + global::OpenAI.FineTuneSupervisedMethodHyperparameters? hyperparameters) + { + this.Hyperparameters = hyperparameters; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTuneSupervisedMethod() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparameters.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparameters.Json.g.cs new file mode 100644 index 00000000..699942b8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparameters.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTuneSupervisedMethodHyperparameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTuneSupervisedMethodHyperparameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTuneSupervisedMethodHyperparameters), + jsonSerializerContext) as global::OpenAI.FineTuneSupervisedMethodHyperparameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTuneSupervisedMethodHyperparameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTuneSupervisedMethodHyperparameters), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuneSupervisedMethodHyperparameters; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparameters.g.cs new file mode 100644 index 00000000..c87b6b08 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparameters.g.cs @@ -0,0 +1,76 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// The hyperparameters used for the fine-tuning job. + /// + public sealed partial class FineTuneSupervisedMethodHyperparameters + { + /// + /// Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("batch_size")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? BatchSize { get; set; } + + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("learning_rate_multiplier")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? LearningRateMultiplier { get; set; } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("n_epochs")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? NEpochs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.
+ /// Default Value: auto + /// + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.
+ /// Default Value: auto + /// + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
+ /// Default Value: auto + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FineTuneSupervisedMethodHyperparameters( + global::OpenAI.OneOf? batchSize, + global::OpenAI.OneOf? learningRateMultiplier, + global::OpenAI.OneOf? nEpochs) + { + this.BatchSize = batchSize; + this.LearningRateMultiplier = learningRateMultiplier; + this.NEpochs = nEpochs; + } + + /// + /// Initializes a new instance of the class. + /// + public FineTuneSupervisedMethodHyperparameters() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersBatchSize.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersBatchSize.g.cs new file mode 100644 index 00000000..7bbc0e38 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersBatchSize.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneSupervisedMethodHyperparametersBatchSize + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneSupervisedMethodHyperparametersBatchSizeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneSupervisedMethodHyperparametersBatchSize value) + { + return value switch + { + FineTuneSupervisedMethodHyperparametersBatchSize.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneSupervisedMethodHyperparametersBatchSize? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneSupervisedMethodHyperparametersBatchSize.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.g.cs new file mode 100644 index 00000000..e1142318 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneSupervisedMethodHyperparametersLearningRateMultiplier + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneSupervisedMethodHyperparametersLearningRateMultiplierExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneSupervisedMethodHyperparametersLearningRateMultiplier value) + { + return value switch + { + FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneSupervisedMethodHyperparametersLearningRateMultiplier? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneSupervisedMethodHyperparametersLearningRateMultiplier.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersNEpochs.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersNEpochs.g.cs new file mode 100644 index 00000000..d2e041fe --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuneSupervisedMethodHyperparametersNEpochs.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuneSupervisedMethodHyperparametersNEpochs + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuneSupervisedMethodHyperparametersNEpochsExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuneSupervisedMethodHyperparametersNEpochs value) + { + return value switch + { + FineTuneSupervisedMethodHyperparametersNEpochs.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuneSupervisedMethodHyperparametersNEpochs? ToEnum(string value) + { + return value switch + { + "auto" => FineTuneSupervisedMethodHyperparametersNEpochs.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJob.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJob.g.cs index d719028a..15bc08ea 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJob.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJob.g.cs @@ -48,7 +48,7 @@ public sealed partial class FineTuningJob public required global::System.DateTimeOffset? FinishedAt { get; set; } /// - /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// The hyperparameters used for the fine-tuning job. This value will only be returned when running `supervised` jobs. /// [global::System.Text.Json.Serialization.JsonPropertyName("hyperparameters")] [global::System.Text.Json.Serialization.JsonRequired] @@ -131,6 +131,12 @@ public sealed partial class FineTuningJob [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.UnixTimestampJsonConverter))] public global::System.DateTimeOffset? EstimatedFinish { get; set; } + /// + /// The method used for fine-tuning. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("method")] + public global::OpenAI.FineTuneMethod? Method { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -156,7 +162,7 @@ public sealed partial class FineTuningJob /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. /// /// - /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// The hyperparameters used for the fine-tuning job. This value will only be returned when running `supervised` jobs. /// /// /// The base model that is being fine-tuned. @@ -191,6 +197,9 @@ public sealed partial class FineTuningJob /// /// The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. /// + /// + /// The method used for fine-tuning. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public FineTuningJob( string id, @@ -209,7 +218,8 @@ public FineTuningJob( int seed, global::OpenAI.FineTuningJobObject @object, global::System.Collections.Generic.IList>? integrations, - global::System.DateTimeOffset? estimatedFinish) + global::System.DateTimeOffset? estimatedFinish, + global::OpenAI.FineTuneMethod? method) { this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); this.CreatedAt = createdAt; @@ -228,6 +238,7 @@ public FineTuningJob( this.Object = @object; this.Integrations = integrations; this.EstimatedFinish = estimatedFinish; + this.Method = method; } /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEvent.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEvent.g.cs index 671175af..4ef99313 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEvent.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEvent.g.cs @@ -9,21 +9,29 @@ namespace OpenAI public sealed partial class FineTuningJobEvent { /// - /// + /// The object type, which is always "fine_tuning.job.event". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectJsonConverter))] + public global::OpenAI.FineTuningJobEventObject Object { get; set; } + + /// + /// The object identifier. /// [global::System.Text.Json.Serialization.JsonPropertyName("id")] [global::System.Text.Json.Serialization.JsonRequired] public required string Id { get; set; } /// - /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. /// [global::System.Text.Json.Serialization.JsonPropertyName("created_at")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.UnixTimestampJsonConverter))] [global::System.Text.Json.Serialization.JsonRequired] - public required int CreatedAt { get; set; } + public required global::System.DateTimeOffset CreatedAt { get; set; } /// - /// + /// The log level of the event. /// [global::System.Text.Json.Serialization.JsonPropertyName("level")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelJsonConverter))] @@ -31,18 +39,24 @@ public sealed partial class FineTuningJobEvent public required global::OpenAI.FineTuningJobEventLevel Level { get; set; } /// - /// + /// The message of the event. /// [global::System.Text.Json.Serialization.JsonPropertyName("message")] [global::System.Text.Json.Serialization.JsonRequired] public required string Message { get; set; } /// - /// + /// The type of event. /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectJsonConverter))] - public global::OpenAI.FineTuningJobEventObject Object { get; set; } + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.FineTuningJobEventTypeJsonConverter))] + public global::OpenAI.FineTuningJobEventType? Type { get; set; } + + /// + /// The data associated with the event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("data")] + public object? Data { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -53,24 +67,44 @@ public sealed partial class FineTuningJobEvent /// /// Initializes a new instance of the class. /// - /// - /// - /// - /// - /// + /// + /// The object type, which is always "fine_tuning.job.event". + /// + /// + /// The object identifier. + /// + /// + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + /// + /// + /// The log level of the event. + /// + /// + /// The message of the event. + /// + /// + /// The type of event. + /// + /// + /// The data associated with the event. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public FineTuningJobEvent( string id, - int createdAt, + global::System.DateTimeOffset createdAt, global::OpenAI.FineTuningJobEventLevel level, string message, - global::OpenAI.FineTuningJobEventObject @object) + global::OpenAI.FineTuningJobEventObject @object, + global::OpenAI.FineTuningJobEventType? type, + object? data) { this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); this.CreatedAt = createdAt; this.Level = level; this.Message = message ?? throw new global::System.ArgumentNullException(nameof(message)); this.Object = @object; + this.Type = type; + this.Data = data; } /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventData.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventData.Json.g.cs new file mode 100644 index 00000000..ac41bf2e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventData.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class FineTuningJobEventData + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.FineTuningJobEventData? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.FineTuningJobEventData), + jsonSerializerContext) as global::OpenAI.FineTuningJobEventData; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.FineTuningJobEventData? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.FineTuningJobEventData), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.FineTuningJobEventData; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventData.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventData.g.cs new file mode 100644 index 00000000..45b1b2f7 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventData.g.cs @@ -0,0 +1,18 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The data associated with the event. + /// + public sealed partial class FineTuningJobEventData + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventLevel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventLevel.g.cs index b244c403..485316b4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventLevel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventLevel.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// + /// The log level of the event. /// public enum FineTuningJobEventLevel { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventObject.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventObject.g.cs index 5752de25..3d63d266 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventObject.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventObject.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// + /// The object type, which is always "fine_tuning.job.event". /// public enum FineTuningJobEventObject { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventType.g.cs new file mode 100644 index 00000000..7f55c9a0 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobEventType.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The type of event. + /// + public enum FineTuningJobEventType + { + /// + /// + /// + Message, + /// + /// + /// + Metrics, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuningJobEventTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuningJobEventType value) + { + return value switch + { + FineTuningJobEventType.Message => "message", + FineTuningJobEventType.Metrics => "metrics", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuningJobEventType? ToEnum(string value) + { + return value switch + { + "message" => FineTuningJobEventType.Message, + "metrics" => FineTuningJobEventType.Metrics, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparameters.g.cs index faf3715f..1b15903c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparameters.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparameters.g.cs @@ -6,20 +6,36 @@ namespace OpenAI { /// - /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// The hyperparameters used for the fine-tuning job. This value will only be returned when running `supervised` jobs. /// public sealed partial class FineTuningJobHyperparameters { /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
- /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs.
+ /// Number of examples in each batch. A larger batch size means that model parameters
+ /// are updated less frequently, but with lower variance.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("batch_size")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? BatchSize { get; set; } + + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid
+ /// overfitting.
+ /// Default Value: auto + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("learning_rate_multiplier")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? LearningRateMultiplier { get; set; } + + /// + /// The number of epochs to train the model for. An epoch refers to one full cycle
+ /// through the training dataset.
/// Default Value: auto ///
- /// global::OpenAI.FineTuningJobHyperparametersNEpochs.Auto [global::System.Text.Json.Serialization.JsonPropertyName("n_epochs")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] - [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.OneOf NEpochs { get; set; } = global::OpenAI.FineTuningJobHyperparametersNEpochs.Auto; + public global::OpenAI.OneOf? NEpochs { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -30,15 +46,29 @@ public sealed partial class FineTuningJobHyperparameters /// /// Initializes a new instance of the class. /// + /// + /// Number of examples in each batch. A larger batch size means that model parameters
+ /// are updated less frequently, but with lower variance.
+ /// Default Value: auto + /// + /// + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid
+ /// overfitting.
+ /// Default Value: auto + /// /// - /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
- /// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs.
+ /// The number of epochs to train the model for. An epoch refers to one full cycle
+ /// through the training dataset.
/// Default Value: auto /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public FineTuningJobHyperparameters( - global::OpenAI.OneOf nEpochs) + global::OpenAI.OneOf? batchSize, + global::OpenAI.OneOf? learningRateMultiplier, + global::OpenAI.OneOf? nEpochs) { + this.BatchSize = batchSize; + this.LearningRateMultiplier = learningRateMultiplier; this.NEpochs = nEpochs; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparametersBatchSize.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparametersBatchSize.g.cs new file mode 100644 index 00000000..7df881c8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparametersBatchSize.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuningJobHyperparametersBatchSize + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuningJobHyperparametersBatchSizeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuningJobHyperparametersBatchSize value) + { + return value switch + { + FineTuningJobHyperparametersBatchSize.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuningJobHyperparametersBatchSize? ToEnum(string value) + { + return value switch + { + "auto" => FineTuningJobHyperparametersBatchSize.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparametersLearningRateMultiplier.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparametersLearningRateMultiplier.g.cs new file mode 100644 index 00000000..2a5462e6 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.FineTuningJobHyperparametersLearningRateMultiplier.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum FineTuningJobHyperparametersLearningRateMultiplier + { + /// + /// + /// + Auto, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FineTuningJobHyperparametersLearningRateMultiplierExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FineTuningJobHyperparametersLearningRateMultiplier value) + { + return value switch + { + FineTuningJobHyperparametersLearningRateMultiplier.Auto => "auto", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FineTuningJobHyperparametersLearningRateMultiplier? ToEnum(string value) + { + return value switch + { + "auto" => FineTuningJobHyperparametersLearningRateMultiplier.Auto, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs index 2dcfc829..daced967 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs @@ -23,6 +23,13 @@ public sealed partial class RealtimeClientEventResponseCancel [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCancelTypeJsonConverter))] public global::OpenAI.RealtimeClientEventResponseCancelType Type { get; set; } + /// + /// A specific response ID to cancel - if not provided, will cancel an
+ /// in-progress response in the default conversation. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + public string? ResponseId { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -38,13 +45,19 @@ public sealed partial class RealtimeClientEventResponseCancel /// /// The event type, must be `response.cancel`. /// + /// + /// A specific response ID to cancel - if not provided, will cancel an
+ /// in-progress response in the default conversation. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RealtimeClientEventResponseCancel( string? eventId, - global::OpenAI.RealtimeClientEventResponseCancelType type) + global::OpenAI.RealtimeClientEventResponseCancelType type, + string? responseId) { this.EventId = eventId; this.Type = type; + this.ResponseId = responseId; } /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs index 33f334b6..3563f16b 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs @@ -33,16 +33,10 @@ public sealed partial class RealtimeClientEventResponseCreate public global::OpenAI.RealtimeClientEventResponseCreateType Type { get; set; } /// - /// A session refers to a single WebSocket connection between a client and the server.
- /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
- /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
- /// and function calls (if functions are provided by the client).
- /// A realtime Session represents the overall client-server interaction, and contains default configuration.
- /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). + /// Create a new Realtime response with these parameters ///
[global::System.Text.Json.Serialization.JsonPropertyName("response")] - [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeSession Response { get; set; } + public global::OpenAI.RealtimeResponseCreateParams? Response { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -60,22 +54,17 @@ public sealed partial class RealtimeClientEventResponseCreate /// The event type, must be `response.create`. /// /// - /// A session refers to a single WebSocket connection between a client and the server.
- /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
- /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
- /// and function calls (if functions are provided by the client).
- /// A realtime Session represents the overall client-server interaction, and contains default configuration.
- /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). + /// Create a new Realtime response with these parameters /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RealtimeClientEventResponseCreate( - global::OpenAI.RealtimeSession response, string? eventId, - global::OpenAI.RealtimeClientEventResponseCreateType type) + global::OpenAI.RealtimeClientEventResponseCreateType type, + global::OpenAI.RealtimeResponseCreateParams? response) { - this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); this.EventId = eventId; this.Type = type; + this.Response = response; } /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs index 33b41b5e..de7f8523 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs @@ -27,16 +27,11 @@ public sealed partial class RealtimeClientEventSessionUpdate public global::OpenAI.RealtimeClientEventSessionUpdateType Type { get; set; } /// - /// A session refers to a single WebSocket connection between a client and the server.
- /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
- /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
- /// and function calls (if functions are provided by the client).
- /// A realtime Session represents the overall client-server interaction, and contains default configuration.
- /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). + /// Realtime session object configuration. ///
[global::System.Text.Json.Serialization.JsonPropertyName("session")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::OpenAI.RealtimeSession Session { get; set; } + public required global::OpenAI.RealtimeSessionCreateRequest Session { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -54,16 +49,11 @@ public sealed partial class RealtimeClientEventSessionUpdate /// The event type, must be `session.update`. /// /// - /// A session refers to a single WebSocket connection between a client and the server.
- /// Once a client creates a session, it then sends JSON-formatted events containing text and audio chunks.
- /// The server will respond in kind with audio containing voice output, a text transcript of that voice output,
- /// and function calls (if functions are provided by the client).
- /// A realtime Session represents the overall client-server interaction, and contains default configuration.
- /// It has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create). + /// Realtime session object configuration. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RealtimeClientEventSessionUpdate( - global::OpenAI.RealtimeSession session, + global::OpenAI.RealtimeSessionCreateRequest session, string? eventId, global::OpenAI.RealtimeClientEventSessionUpdateType type) { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParams.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParams.Json.g.cs new file mode 100644 index 00000000..46851add --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParams.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeResponseCreateParams + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeResponseCreateParams? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeResponseCreateParams), + jsonSerializerContext) as global::OpenAI.RealtimeResponseCreateParams; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeResponseCreateParams? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeResponseCreateParams), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeResponseCreateParams; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParams.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParams.g.cs new file mode 100644 index 00000000..f1e60cf5 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParams.g.cs @@ -0,0 +1,213 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// Create a new Realtime response with these parameters + /// + public sealed partial class RealtimeResponseCreateParams + { + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsVoiceJsonConverter))] + public global::OpenAI.RealtimeResponseCreateParamsVoice? Voice { get; set; } + + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsOutputAudioFormatJsonConverter))] + public global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat? OutputAudioFormat { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function, like `{"type": "function", "function": {"name": "my_function"}}`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("max_response_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? MaxResponseOutputTokens { get; set; } + + /// + /// Controls which conversation the response is added to. Currently supports
+ /// `auto` and `none`, with `auto` as the default value. The `auto` value
+ /// means that the contents of the response will be added to the default
+ /// conversation. Set this to `none` to create an out-of-band response which
+ /// will not add items to default conversation. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("conversation")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? Conversation { get; set; } + + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be
+ /// useful for storing additional information about the object in a structured
+ /// format. Keys can be a maximum of 64 characters long and values can be a
+ /// maximum of 512 characters long. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] + public object? Metadata { get; set; } + + /// + /// Input items to include in the prompt for the model. Creates a new context
+ /// for this response, without including the default conversation. Can include
+ /// references to items from the default conversation. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("input")] + public global::System.Collections.Generic.IList? Input { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function, like `{"type": "function", "function": {"name": "my_function"}}`. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + /// + /// Controls which conversation the response is added to. Currently supports
+ /// `auto` and `none`, with `auto` as the default value. The `auto` value
+ /// means that the contents of the response will be added to the default
+ /// conversation. Set this to `none` to create an out-of-band response which
+ /// will not add items to default conversation. + /// + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be
+ /// useful for storing additional information about the object in a structured
+ /// format. Keys can be a maximum of 64 characters long and values can be a
+ /// maximum of 512 characters long. + /// + /// + /// Input items to include in the prompt for the model. Creates a new context
+ /// for this response, without including the default conversation. Can include
+ /// references to items from the default conversation. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseCreateParams( + global::System.Collections.Generic.IList? modalities, + string? instructions, + global::OpenAI.RealtimeResponseCreateParamsVoice? voice, + global::OpenAI.RealtimeResponseCreateParamsOutputAudioFormat? outputAudioFormat, + global::System.Collections.Generic.IList? tools, + string? toolChoice, + double? temperature, + global::OpenAI.OneOf? maxResponseOutputTokens, + global::OpenAI.OneOf? conversation, + object? metadata, + global::System.Collections.Generic.IList? input) + { + this.Modalities = modalities; + this.Instructions = instructions; + this.Voice = voice; + this.OutputAudioFormat = outputAudioFormat; + this.Tools = tools; + this.ToolChoice = toolChoice; + this.Temperature = temperature; + this.MaxResponseOutputTokens = maxResponseOutputTokens; + this.Conversation = conversation; + this.Metadata = metadata; + this.Input = input; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseCreateParams() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsConversation.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsConversation.g.cs new file mode 100644 index 00000000..2f72d442 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsConversation.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Default Value: auto + /// + public enum RealtimeResponseCreateParamsConversation + { + /// + /// + /// + Auto, + /// + /// + /// + None, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseCreateParamsConversationExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseCreateParamsConversation value) + { + return value switch + { + RealtimeResponseCreateParamsConversation.Auto => "auto", + RealtimeResponseCreateParamsConversation.None => "none", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseCreateParamsConversation? ToEnum(string value) + { + return value switch + { + "auto" => RealtimeResponseCreateParamsConversation.Auto, + "none" => RealtimeResponseCreateParamsConversation.None, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMaxResponseOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMaxResponseOutputTokens.g.cs new file mode 100644 index 00000000..083f68d3 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMaxResponseOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeResponseCreateParamsMaxResponseOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseCreateParamsMaxResponseOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseCreateParamsMaxResponseOutputTokens value) + { + return value switch + { + RealtimeResponseCreateParamsMaxResponseOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseCreateParamsMaxResponseOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeResponseCreateParamsMaxResponseOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMetadata.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMetadata.Json.g.cs new file mode 100644 index 00000000..eabef3cc --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMetadata.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeResponseCreateParamsMetadata + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeResponseCreateParamsMetadata? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeResponseCreateParamsMetadata), + jsonSerializerContext) as global::OpenAI.RealtimeResponseCreateParamsMetadata; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeResponseCreateParamsMetadata? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeResponseCreateParamsMetadata), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeResponseCreateParamsMetadata; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMetadata.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMetadata.g.cs new file mode 100644 index 00000000..c22f73ff --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsMetadata.g.cs @@ -0,0 +1,21 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Set of 16 key-value pairs that can be attached to an object. This can be
+ /// useful for storing additional information about the object in a structured
+ /// format. Keys can be a maximum of 64 characters long and values can be a
+ /// maximum of 512 characters long. + ///
+ public sealed partial class RealtimeResponseCreateParamsMetadata + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsModalitie.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsModalitie.g.cs new file mode 100644 index 00000000..2808ee94 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsModalitie.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeResponseCreateParamsModalitie + { + /// + /// + /// + Text, + /// + /// + /// + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseCreateParamsModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseCreateParamsModalitie value) + { + return value switch + { + RealtimeResponseCreateParamsModalitie.Text => "text", + RealtimeResponseCreateParamsModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseCreateParamsModalitie? ToEnum(string value) + { + return value switch + { + "text" => RealtimeResponseCreateParamsModalitie.Text, + "audio" => RealtimeResponseCreateParamsModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsOutputAudioFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsOutputAudioFormat.g.cs new file mode 100644 index 00000000..4d3cf69e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsOutputAudioFormat.g.cs @@ -0,0 +1,57 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + public enum RealtimeResponseCreateParamsOutputAudioFormat + { + /// + /// + /// + Pcm16, + /// + /// + /// + G711Ulaw, + /// + /// + /// + G711Alaw, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseCreateParamsOutputAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseCreateParamsOutputAudioFormat value) + { + return value switch + { + RealtimeResponseCreateParamsOutputAudioFormat.Pcm16 => "pcm16", + RealtimeResponseCreateParamsOutputAudioFormat.G711Ulaw => "g711_ulaw", + RealtimeResponseCreateParamsOutputAudioFormat.G711Alaw => "g711_alaw", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseCreateParamsOutputAudioFormat? ToEnum(string value) + { + return value switch + { + "pcm16" => RealtimeResponseCreateParamsOutputAudioFormat.Pcm16, + "g711_ulaw" => RealtimeResponseCreateParamsOutputAudioFormat.G711Ulaw, + "g711_alaw" => RealtimeResponseCreateParamsOutputAudioFormat.G711Alaw, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsTool.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsTool.Json.g.cs new file mode 100644 index 00000000..48ea022e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsTool.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeResponseCreateParamsTool + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeResponseCreateParamsTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeResponseCreateParamsTool), + jsonSerializerContext) as global::OpenAI.RealtimeResponseCreateParamsTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeResponseCreateParamsTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeResponseCreateParamsTool), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeResponseCreateParamsTool; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsTool.g.cs new file mode 100644 index 00000000..51e9cbd1 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsTool.g.cs @@ -0,0 +1,81 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeResponseCreateParamsTool + { + /// + /// The type of the tool, i.e. `function`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeResponseCreateParamsToolTypeJsonConverter))] + public global::OpenAI.RealtimeResponseCreateParamsToolType? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the tool, i.e. `function`. + /// + /// + /// The name of the function. + /// + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + /// + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseCreateParamsTool( + global::OpenAI.RealtimeResponseCreateParamsToolType? type, + string? name, + string? description, + object? parameters) + { + this.Type = type; + this.Name = name; + this.Description = description; + this.Parameters = parameters; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseCreateParamsTool() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolParameters.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolParameters.Json.g.cs new file mode 100644 index 00000000..b447de42 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolParameters.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeResponseCreateParamsToolParameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeResponseCreateParamsToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeResponseCreateParamsToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeResponseCreateParamsToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeResponseCreateParamsToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeResponseCreateParamsToolParameters), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeResponseCreateParamsToolParameters; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolParameters.g.cs new file mode 100644 index 00000000..8aaec17d --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolParameters.g.cs @@ -0,0 +1,18 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeResponseCreateParamsToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolType.g.cs new file mode 100644 index 00000000..526f3f2d --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsToolType.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The type of the tool, i.e. `function`. + /// + public enum RealtimeResponseCreateParamsToolType + { + /// + /// + /// + Function, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseCreateParamsToolTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseCreateParamsToolType value) + { + return value switch + { + RealtimeResponseCreateParamsToolType.Function => "function", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseCreateParamsToolType? ToEnum(string value) + { + return value switch + { + "function" => RealtimeResponseCreateParamsToolType.Function, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsVoice.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsVoice.g.cs new file mode 100644 index 00000000..251129e2 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeResponseCreateParamsVoice.g.cs @@ -0,0 +1,90 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + ///
+ public enum RealtimeResponseCreateParamsVoice + { + /// + /// + /// + Alloy, + /// + /// + /// + Ash, + /// + /// + /// + Ballad, + /// + /// + /// + Coral, + /// + /// + /// + Echo, + /// + /// + /// + Sage, + /// + /// + /// + Shimmer, + /// + /// + /// + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseCreateParamsVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseCreateParamsVoice value) + { + return value switch + { + RealtimeResponseCreateParamsVoice.Alloy => "alloy", + RealtimeResponseCreateParamsVoice.Ash => "ash", + RealtimeResponseCreateParamsVoice.Ballad => "ballad", + RealtimeResponseCreateParamsVoice.Coral => "coral", + RealtimeResponseCreateParamsVoice.Echo => "echo", + RealtimeResponseCreateParamsVoice.Sage => "sage", + RealtimeResponseCreateParamsVoice.Shimmer => "shimmer", + RealtimeResponseCreateParamsVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseCreateParamsVoice? ToEnum(string value) + { + return value switch + { + "alloy" => RealtimeResponseCreateParamsVoice.Alloy, + "ash" => RealtimeResponseCreateParamsVoice.Ash, + "ballad" => RealtimeResponseCreateParamsVoice.Ballad, + "coral" => RealtimeResponseCreateParamsVoice.Coral, + "echo" => RealtimeResponseCreateParamsVoice.Echo, + "sage" => RealtimeResponseCreateParamsVoice.Sage, + "shimmer" => RealtimeResponseCreateParamsVoice.Shimmer, + "verse" => RealtimeResponseCreateParamsVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs index 44f6d6f6..67da8af2 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs @@ -12,7 +12,8 @@ public sealed partial class RealtimeServerEventErrorError /// The type of error (e.g., "invalid_request_error", "server_error"). ///
[global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } /// /// Error code, if any. @@ -24,7 +25,8 @@ public sealed partial class RealtimeServerEventErrorError /// A human-readable error message. /// [global::System.Text.Json.Serialization.JsonPropertyName("message")] - public string? Message { get; set; } + [global::System.Text.Json.Serialization.JsonRequired] + public required string Message { get; set; } /// /// Parameter related to the error, if any. @@ -64,15 +66,15 @@ public sealed partial class RealtimeServerEventErrorError /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RealtimeServerEventErrorError( - string? type, + string type, + string message, string? code, - string? message, string? param, string? eventId) { - this.Type = type; + this.Type = type ?? throw new global::System.ArgumentNullException(nameof(type)); + this.Message = message ?? throw new global::System.ArgumentNullException(nameof(message)); this.Code = code; - this.Message = message; this.Param = param; this.EventId = eventId; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs index 103951c5..155ea203 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs @@ -12,7 +12,8 @@ public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit /// The name of the rate limit (`requests`, `tokens`). /// [global::System.Text.Json.Serialization.JsonPropertyName("name")] - public string? Name { get; set; } + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeServerEventRateLimitsUpdatedRateLimitNameJsonConverter))] + public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName? Name { get; set; } /// /// The maximum allowed value for the rate limit. @@ -55,7 +56,7 @@ public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RealtimeServerEventRateLimitsUpdatedRateLimit( - string? name, + global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimitName? name, int? limit, int? remaining, double? resetSeconds) diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimitName.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimitName.g.cs new file mode 100644 index 00000000..00a1eafa --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimitName.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The name of the rate limit (`requests`, `tokens`). + /// + public enum RealtimeServerEventRateLimitsUpdatedRateLimitName + { + /// + /// + /// + Requests, + /// + /// + /// + Tokens, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventRateLimitsUpdatedRateLimitNameExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventRateLimitsUpdatedRateLimitName value) + { + return value switch + { + RealtimeServerEventRateLimitsUpdatedRateLimitName.Requests => "requests", + RealtimeServerEventRateLimitsUpdatedRateLimitName.Tokens => "tokens", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventRateLimitsUpdatedRateLimitName? ToEnum(string value) + { + return value switch + { + "requests" => RealtimeServerEventRateLimitsUpdatedRateLimitName.Requests, + "tokens" => RealtimeServerEventRateLimitsUpdatedRateLimitName.Tokens, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs index 1bdb7ab2..76ad3527 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs @@ -12,7 +12,8 @@ public sealed partial class RealtimeServerEventResponseContentPartDonePart /// The content type ("text", "audio"). /// [global::System.Text.Json.Serialization.JsonPropertyName("type")] - public string? Type { get; set; } + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeServerEventResponseContentPartDonePartTypeJsonConverter))] + public global::OpenAI.RealtimeServerEventResponseContentPartDonePartType? Type { get; set; } /// /// The text content (if type is "text"). @@ -55,7 +56,7 @@ public sealed partial class RealtimeServerEventResponseContentPartDonePart /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RealtimeServerEventResponseContentPartDonePart( - string? type, + global::OpenAI.RealtimeServerEventResponseContentPartDonePartType? type, string? text, string? audio, string? transcript) diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePartType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePartType.g.cs new file mode 100644 index 00000000..b57fccb1 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePartType.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The content type ("text", "audio"). + /// + public enum RealtimeServerEventResponseContentPartDonePartType + { + /// + /// + /// + Audio, + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartDonePartTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartDonePartType value) + { + return value switch + { + RealtimeServerEventResponseContentPartDonePartType.Audio => "audio", + RealtimeServerEventResponseContentPartDonePartType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartDonePartType? ToEnum(string value) + { + return value switch + { + "audio" => RealtimeServerEventResponseContentPartDonePartType.Audio, + "text" => RealtimeServerEventResponseContentPartDonePartType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequest.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequest.Json.g.cs new file mode 100644 index 00000000..a9323e64 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequest.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateRequest + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateRequest? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateRequest), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateRequest; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateRequest? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateRequest), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateRequest; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequest.g.cs new file mode 100644 index 00000000..b121e449 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequest.g.cs @@ -0,0 +1,220 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// Realtime session object configuration. + /// + public sealed partial class RealtimeSessionCreateRequest + { + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The Realtime model used for this session. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestModelJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeSessionCreateRequestModel Model { get; set; } + + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestVoiceJsonConverter))] + public global::OpenAI.RealtimeSessionCreateRequestVoice? Voice { get; set; } + + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestInputAudioFormatJsonConverter))] + public global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? InputAudioFormat { get; set; } + + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestOutputAudioFormatJsonConverter))] + public global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] + public global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] + public global::OpenAI.RealtimeSessionCreateRequestTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("max_response_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? MaxResponseOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The Realtime model used for this session. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + /// + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + /// + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateRequest( + global::OpenAI.RealtimeSessionCreateRequestModel model, + global::System.Collections.Generic.IList? modalities, + string? instructions, + global::OpenAI.RealtimeSessionCreateRequestVoice? voice, + global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? inputAudioFormat, + global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? outputAudioFormat, + global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? inputAudioTranscription, + global::OpenAI.RealtimeSessionCreateRequestTurnDetection? turnDetection, + global::System.Collections.Generic.IList? tools, + string? toolChoice, + double? temperature, + global::OpenAI.OneOf? maxResponseOutputTokens) + { + this.Model = model; + this.Modalities = modalities; + this.Instructions = instructions; + this.Voice = voice; + this.InputAudioFormat = inputAudioFormat; + this.OutputAudioFormat = outputAudioFormat; + this.InputAudioTranscription = inputAudioTranscription; + this.TurnDetection = turnDetection; + this.Tools = tools; + this.ToolChoice = toolChoice; + this.Temperature = temperature; + this.MaxResponseOutputTokens = maxResponseOutputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateRequest() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioFormat.g.cs new file mode 100644 index 00000000..b4ba6ce8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioFormat.g.cs @@ -0,0 +1,57 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + public enum RealtimeSessionCreateRequestInputAudioFormat + { + /// + /// + /// + Pcm16, + /// + /// + /// + G711Ulaw, + /// + /// + /// + G711Alaw, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestInputAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestInputAudioFormat value) + { + return value switch + { + RealtimeSessionCreateRequestInputAudioFormat.Pcm16 => "pcm16", + RealtimeSessionCreateRequestInputAudioFormat.G711Ulaw => "g711_ulaw", + RealtimeSessionCreateRequestInputAudioFormat.G711Alaw => "g711_alaw", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestInputAudioFormat? ToEnum(string value) + { + return value switch + { + "pcm16" => RealtimeSessionCreateRequestInputAudioFormat.Pcm16, + "g711_ulaw" => RealtimeSessionCreateRequestInputAudioFormat.G711Ulaw, + "g711_alaw" => RealtimeSessionCreateRequestInputAudioFormat.G711Alaw, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioTranscription.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioTranscription.Json.g.cs new file mode 100644 index 00000000..4e02caec --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioTranscription.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateRequestInputAudioTranscription + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioTranscription.g.cs new file mode 100644 index 00000000..f91a201b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestInputAudioTranscription.g.cs @@ -0,0 +1,49 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ public sealed partial class RealtimeSessionCreateRequestInputAudioTranscription + { + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateRequestInputAudioTranscription( + string? model) + { + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateRequestInputAudioTranscription() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestMaxResponseOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestMaxResponseOutputTokens.g.cs new file mode 100644 index 00000000..1421c869 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestMaxResponseOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeSessionCreateRequestMaxResponseOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestMaxResponseOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestMaxResponseOutputTokens value) + { + return value switch + { + RealtimeSessionCreateRequestMaxResponseOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestMaxResponseOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeSessionCreateRequestMaxResponseOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalitie.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalitie.g.cs new file mode 100644 index 00000000..806309cb --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalitie.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeSessionCreateRequestModalitie + { + /// + /// + /// + Text, + /// + /// + /// + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestModalitie value) + { + return value switch + { + RealtimeSessionCreateRequestModalitie.Text => "text", + RealtimeSessionCreateRequestModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestModalitie? ToEnum(string value) + { + return value switch + { + "text" => RealtimeSessionCreateRequestModalitie.Text, + "audio" => RealtimeSessionCreateRequestModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalities.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalities.Json.g.cs new file mode 100644 index 00000000..f08a98e8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalities.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateRequestModalities + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateRequestModalities? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateRequestModalities), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateRequestModalities; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateRequestModalities? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateRequestModalities), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateRequestModalities; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalities.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalities.g.cs new file mode 100644 index 00000000..9efc51aa --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModalities.g.cs @@ -0,0 +1,19 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ public sealed partial class RealtimeSessionCreateRequestModalities + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModel.g.cs new file mode 100644 index 00000000..3f3308aa --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestModel.g.cs @@ -0,0 +1,69 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The Realtime model used for this session. + /// + public enum RealtimeSessionCreateRequestModel + { + /// + /// + /// + Gpt4oRealtimePreview, + /// + /// + /// + Gpt4oRealtimePreview20241001, + /// + /// + /// + Gpt4oRealtimePreview20241217, + /// + /// + /// + Gpt4oMiniRealtimePreview, + /// + /// + /// + Gpt4oMiniRealtimePreview20241217, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestModelExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestModel value) + { + return value switch + { + RealtimeSessionCreateRequestModel.Gpt4oRealtimePreview => "gpt-4o-realtime-preview", + RealtimeSessionCreateRequestModel.Gpt4oRealtimePreview20241001 => "gpt-4o-realtime-preview-2024-10-01", + RealtimeSessionCreateRequestModel.Gpt4oRealtimePreview20241217 => "gpt-4o-realtime-preview-2024-12-17", + RealtimeSessionCreateRequestModel.Gpt4oMiniRealtimePreview => "gpt-4o-mini-realtime-preview", + RealtimeSessionCreateRequestModel.Gpt4oMiniRealtimePreview20241217 => "gpt-4o-mini-realtime-preview-2024-12-17", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestModel? ToEnum(string value) + { + return value switch + { + "gpt-4o-realtime-preview" => RealtimeSessionCreateRequestModel.Gpt4oRealtimePreview, + "gpt-4o-realtime-preview-2024-10-01" => RealtimeSessionCreateRequestModel.Gpt4oRealtimePreview20241001, + "gpt-4o-realtime-preview-2024-12-17" => RealtimeSessionCreateRequestModel.Gpt4oRealtimePreview20241217, + "gpt-4o-mini-realtime-preview" => RealtimeSessionCreateRequestModel.Gpt4oMiniRealtimePreview, + "gpt-4o-mini-realtime-preview-2024-12-17" => RealtimeSessionCreateRequestModel.Gpt4oMiniRealtimePreview20241217, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestOutputAudioFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestOutputAudioFormat.g.cs new file mode 100644 index 00000000..841db9aa --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestOutputAudioFormat.g.cs @@ -0,0 +1,57 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + public enum RealtimeSessionCreateRequestOutputAudioFormat + { + /// + /// + /// + Pcm16, + /// + /// + /// + G711Ulaw, + /// + /// + /// + G711Alaw, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestOutputAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestOutputAudioFormat value) + { + return value switch + { + RealtimeSessionCreateRequestOutputAudioFormat.Pcm16 => "pcm16", + RealtimeSessionCreateRequestOutputAudioFormat.G711Ulaw => "g711_ulaw", + RealtimeSessionCreateRequestOutputAudioFormat.G711Alaw => "g711_alaw", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestOutputAudioFormat? ToEnum(string value) + { + return value switch + { + "pcm16" => RealtimeSessionCreateRequestOutputAudioFormat.Pcm16, + "g711_ulaw" => RealtimeSessionCreateRequestOutputAudioFormat.G711Ulaw, + "g711_alaw" => RealtimeSessionCreateRequestOutputAudioFormat.G711Alaw, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTool.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTool.Json.g.cs new file mode 100644 index 00000000..5af7941f --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTool.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateRequestTool + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateRequestTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateRequestTool), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateRequestTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateRequestTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateRequestTool), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateRequestTool; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTool.g.cs new file mode 100644 index 00000000..31dfdc97 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTool.g.cs @@ -0,0 +1,81 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeSessionCreateRequestTool + { + /// + /// The type of the tool, i.e. `function`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateRequestToolTypeJsonConverter))] + public global::OpenAI.RealtimeSessionCreateRequestToolType? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the tool, i.e. `function`. + /// + /// + /// The name of the function. + /// + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + /// + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateRequestTool( + global::OpenAI.RealtimeSessionCreateRequestToolType? type, + string? name, + string? description, + object? parameters) + { + this.Type = type; + this.Name = name; + this.Description = description; + this.Parameters = parameters; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateRequestTool() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolParameters.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolParameters.Json.g.cs new file mode 100644 index 00000000..6b939814 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolParameters.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateRequestToolParameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateRequestToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateRequestToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateRequestToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateRequestToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateRequestToolParameters), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateRequestToolParameters; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolParameters.g.cs new file mode 100644 index 00000000..ecee5cb6 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolParameters.g.cs @@ -0,0 +1,18 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeSessionCreateRequestToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolType.g.cs new file mode 100644 index 00000000..f5ab45b5 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestToolType.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The type of the tool, i.e. `function`. + /// + public enum RealtimeSessionCreateRequestToolType + { + /// + /// + /// + Function, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestToolTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestToolType value) + { + return value switch + { + RealtimeSessionCreateRequestToolType.Function => "function", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestToolType? ToEnum(string value) + { + return value switch + { + "function" => RealtimeSessionCreateRequestToolType.Function, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTurnDetection.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTurnDetection.Json.g.cs new file mode 100644 index 00000000..da9a7ab7 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTurnDetection.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateRequestTurnDetection + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateRequestTurnDetection? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateRequestTurnDetection), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateRequestTurnDetection; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateRequestTurnDetection? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateRequestTurnDetection), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateRequestTurnDetection; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTurnDetection.g.cs new file mode 100644 index 00000000..5d6cb293 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestTurnDetection.g.cs @@ -0,0 +1,103 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ public sealed partial class RealtimeSessionCreateRequestTurnDetection + { + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] + public double? Threshold { get; set; } + + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Whether or not to automatically generate a response when VAD is
+ /// enabled. `true` by default.
+ /// Default Value: true + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("create_response")] + public bool? CreateResponse { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + /// + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + /// + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + /// + /// + /// Whether or not to automatically generate a response when VAD is
+ /// enabled. `true` by default.
+ /// Default Value: true + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateRequestTurnDetection( + string? type, + double? threshold, + int? prefixPaddingMs, + int? silenceDurationMs, + bool? createResponse) + { + this.Type = type; + this.Threshold = threshold; + this.PrefixPaddingMs = prefixPaddingMs; + this.SilenceDurationMs = silenceDurationMs; + this.CreateResponse = createResponse; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateRequestTurnDetection() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestVoice.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestVoice.g.cs new file mode 100644 index 00000000..847c1150 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateRequestVoice.g.cs @@ -0,0 +1,90 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + ///
+ public enum RealtimeSessionCreateRequestVoice + { + /// + /// + /// + Alloy, + /// + /// + /// + Ash, + /// + /// + /// + Ballad, + /// + /// + /// + Coral, + /// + /// + /// + Echo, + /// + /// + /// + Sage, + /// + /// + /// + Shimmer, + /// + /// + /// + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateRequestVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateRequestVoice value) + { + return value switch + { + RealtimeSessionCreateRequestVoice.Alloy => "alloy", + RealtimeSessionCreateRequestVoice.Ash => "ash", + RealtimeSessionCreateRequestVoice.Ballad => "ballad", + RealtimeSessionCreateRequestVoice.Coral => "coral", + RealtimeSessionCreateRequestVoice.Echo => "echo", + RealtimeSessionCreateRequestVoice.Sage => "sage", + RealtimeSessionCreateRequestVoice.Shimmer => "shimmer", + RealtimeSessionCreateRequestVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateRequestVoice? ToEnum(string value) + { + return value switch + { + "alloy" => RealtimeSessionCreateRequestVoice.Alloy, + "ash" => RealtimeSessionCreateRequestVoice.Ash, + "ballad" => RealtimeSessionCreateRequestVoice.Ballad, + "coral" => RealtimeSessionCreateRequestVoice.Coral, + "echo" => RealtimeSessionCreateRequestVoice.Echo, + "sage" => RealtimeSessionCreateRequestVoice.Sage, + "shimmer" => RealtimeSessionCreateRequestVoice.Shimmer, + "verse" => RealtimeSessionCreateRequestVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponse.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponse.Json.g.cs new file mode 100644 index 00000000..d05f8288 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponse.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponse), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponse), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponse; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponse.g.cs new file mode 100644 index 00000000..3c7cfb05 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponse.g.cs @@ -0,0 +1,217 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// A new Realtime session configuration, with an ephermeral key. Default TTL
+ /// for keys is one minute. + ///
+ public sealed partial class RealtimeSessionCreateResponse + { + /// + /// Ephemeral key returned by the API. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("client_secret")] + public global::OpenAI.RealtimeSessionCreateResponseClientSecret? ClientSecret { get; set; } + + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseVoiceJsonConverter))] + public global::OpenAI.RealtimeSessionCreateResponseVoice? Voice { get; set; } + + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] + public string? InputAudioFormat { get; set; } + + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] + public global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] + public global::OpenAI.RealtimeSessionCreateResponseTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("max_response_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverter))] + public global::OpenAI.OneOf? MaxResponseOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Ephemeral key returned by the API. + /// + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + /// + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + /// + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateResponse( + global::OpenAI.RealtimeSessionCreateResponseClientSecret? clientSecret, + global::System.Collections.Generic.IList? modalities, + string? instructions, + global::OpenAI.RealtimeSessionCreateResponseVoice? voice, + string? inputAudioFormat, + string? outputAudioFormat, + global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription? inputAudioTranscription, + global::OpenAI.RealtimeSessionCreateResponseTurnDetection? turnDetection, + global::System.Collections.Generic.IList? tools, + string? toolChoice, + double? temperature, + global::OpenAI.OneOf? maxResponseOutputTokens) + { + this.ClientSecret = clientSecret; + this.Modalities = modalities; + this.Instructions = instructions; + this.Voice = voice; + this.InputAudioFormat = inputAudioFormat; + this.OutputAudioFormat = outputAudioFormat; + this.InputAudioTranscription = inputAudioTranscription; + this.TurnDetection = turnDetection; + this.Tools = tools; + this.ToolChoice = toolChoice; + this.Temperature = temperature; + this.MaxResponseOutputTokens = maxResponseOutputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateResponse() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseClientSecret.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseClientSecret.Json.g.cs new file mode 100644 index 00000000..25516e8a --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseClientSecret.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponseClientSecret + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponseClientSecret? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponseClientSecret), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponseClientSecret; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponseClientSecret? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponseClientSecret), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponseClientSecret; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseClientSecret.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseClientSecret.g.cs new file mode 100644 index 00000000..0c5bbf4b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseClientSecret.g.cs @@ -0,0 +1,60 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Ephemeral key returned by the API. + /// + public sealed partial class RealtimeSessionCreateResponseClientSecret + { + /// + /// Ephemeral key usable in client environments to authenticate connections
+ /// to the Realtime API. Use this in client-side environments rather than
+ /// a standard API token, which should only be used server-side. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("value")] + public string? Value { get; set; } + + /// + /// Timestamp for when the token expires. Currently, all tokens expire
+ /// after one minute. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("expires_at")] + public int? ExpiresAt { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Ephemeral key usable in client environments to authenticate connections
+ /// to the Realtime API. Use this in client-side environments rather than
+ /// a standard API token, which should only be used server-side. + /// + /// + /// Timestamp for when the token expires. Currently, all tokens expire
+ /// after one minute. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateResponseClientSecret( + string? value, + int? expiresAt) + { + this.Value = value; + this.ExpiresAt = expiresAt; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateResponseClientSecret() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseInputAudioTranscription.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseInputAudioTranscription.Json.g.cs new file mode 100644 index 00000000..ae578e0a --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseInputAudioTranscription.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponseInputAudioTranscription + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponseInputAudioTranscription; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseInputAudioTranscription.g.cs new file mode 100644 index 00000000..1af8e2e8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseInputAudioTranscription.g.cs @@ -0,0 +1,49 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ public sealed partial class RealtimeSessionCreateResponseInputAudioTranscription + { + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateResponseInputAudioTranscription( + string? model) + { + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateResponseInputAudioTranscription() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseMaxResponseOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseMaxResponseOutputTokens.g.cs new file mode 100644 index 00000000..587690c7 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseMaxResponseOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeSessionCreateResponseMaxResponseOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateResponseMaxResponseOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateResponseMaxResponseOutputTokens value) + { + return value switch + { + RealtimeSessionCreateResponseMaxResponseOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateResponseMaxResponseOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeSessionCreateResponseMaxResponseOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalitie.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalitie.g.cs new file mode 100644 index 00000000..d5f9a16d --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalitie.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeSessionCreateResponseModalitie + { + /// + /// + /// + Text, + /// + /// + /// + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateResponseModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateResponseModalitie value) + { + return value switch + { + RealtimeSessionCreateResponseModalitie.Text => "text", + RealtimeSessionCreateResponseModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateResponseModalitie? ToEnum(string value) + { + return value switch + { + "text" => RealtimeSessionCreateResponseModalitie.Text, + "audio" => RealtimeSessionCreateResponseModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalities.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalities.Json.g.cs new file mode 100644 index 00000000..e3af92e2 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalities.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponseModalities + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponseModalities? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponseModalities), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponseModalities; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponseModalities? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponseModalities), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponseModalities; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalities.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalities.g.cs new file mode 100644 index 00000000..02f9b562 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseModalities.g.cs @@ -0,0 +1,19 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ public sealed partial class RealtimeSessionCreateResponseModalities + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTool.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTool.Json.g.cs new file mode 100644 index 00000000..6c843f94 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTool.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponseTool + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponseTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponseTool), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponseTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponseTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponseTool), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponseTool; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTool.g.cs new file mode 100644 index 00000000..21fa6ed6 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTool.g.cs @@ -0,0 +1,81 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeSessionCreateResponseTool + { + /// + /// The type of the tool, i.e. `function`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.RealtimeSessionCreateResponseToolTypeJsonConverter))] + public global::OpenAI.RealtimeSessionCreateResponseToolType? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the tool, i.e. `function`. + /// + /// + /// The name of the function. + /// + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + /// + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateResponseTool( + global::OpenAI.RealtimeSessionCreateResponseToolType? type, + string? name, + string? description, + object? parameters) + { + this.Type = type; + this.Name = name; + this.Description = description; + this.Parameters = parameters; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateResponseTool() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolParameters.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolParameters.Json.g.cs new file mode 100644 index 00000000..d0dd2d96 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolParameters.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponseToolParameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponseToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponseToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponseToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponseToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponseToolParameters), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponseToolParameters; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolParameters.g.cs new file mode 100644 index 00000000..64ab941c --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolParameters.g.cs @@ -0,0 +1,18 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeSessionCreateResponseToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolType.g.cs new file mode 100644 index 00000000..4cab1620 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseToolType.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The type of the tool, i.e. `function`. + /// + public enum RealtimeSessionCreateResponseToolType + { + /// + /// + /// + Function, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateResponseToolTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateResponseToolType value) + { + return value switch + { + RealtimeSessionCreateResponseToolType.Function => "function", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateResponseToolType? ToEnum(string value) + { + return value switch + { + "function" => RealtimeSessionCreateResponseToolType.Function, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTurnDetection.Json.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTurnDetection.Json.g.cs new file mode 100644 index 00000000..62f7b4eb --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTurnDetection.Json.g.cs @@ -0,0 +1,92 @@ +#nullable enable + +namespace OpenAI +{ + public sealed partial class RealtimeSessionCreateResponseTurnDetection + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeSessionCreateResponseTurnDetection? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeSessionCreateResponseTurnDetection), + jsonSerializerContext) as global::OpenAI.RealtimeSessionCreateResponseTurnDetection; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeSessionCreateResponseTurnDetection? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::OpenAI.RealtimeSessionCreateResponseTurnDetection), + jsonSerializerContext).ConfigureAwait(false)) as global::OpenAI.RealtimeSessionCreateResponseTurnDetection; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTurnDetection.g.cs new file mode 100644 index 00000000..e6525774 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseTurnDetection.g.cs @@ -0,0 +1,88 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ public sealed partial class RealtimeSessionCreateResponseTurnDetection + { + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] + public double? Threshold { get; set; } + + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + /// + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + /// + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionCreateResponseTurnDetection( + string? type, + double? threshold, + int? prefixPaddingMs, + int? silenceDurationMs) + { + this.Type = type; + this.Threshold = threshold; + this.PrefixPaddingMs = prefixPaddingMs; + this.SilenceDurationMs = silenceDurationMs; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionCreateResponseTurnDetection() + { + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseVoice.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseVoice.g.cs new file mode 100644 index 00000000..e247b832 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeSessionCreateResponseVoice.g.cs @@ -0,0 +1,90 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + ///
+ public enum RealtimeSessionCreateResponseVoice + { + /// + /// + /// + Alloy, + /// + /// + /// + Ash, + /// + /// + /// + Ballad, + /// + /// + /// + Coral, + /// + /// + /// + Echo, + /// + /// + /// + Sage, + /// + /// + /// + Shimmer, + /// + /// + /// + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionCreateResponseVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionCreateResponseVoice value) + { + return value switch + { + RealtimeSessionCreateResponseVoice.Alloy => "alloy", + RealtimeSessionCreateResponseVoice.Ash => "ash", + RealtimeSessionCreateResponseVoice.Ballad => "ballad", + RealtimeSessionCreateResponseVoice.Coral => "coral", + RealtimeSessionCreateResponseVoice.Echo => "echo", + RealtimeSessionCreateResponseVoice.Sage => "sage", + RealtimeSessionCreateResponseVoice.Shimmer => "shimmer", + RealtimeSessionCreateResponseVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionCreateResponseVoice? ToEnum(string value) + { + return value switch + { + "alloy" => RealtimeSessionCreateResponseVoice.Alloy, + "ash" => RealtimeSessionCreateResponseVoice.Ash, + "ballad" => RealtimeSessionCreateResponseVoice.Ballad, + "coral" => RealtimeSessionCreateResponseVoice.Coral, + "echo" => RealtimeSessionCreateResponseVoice.Echo, + "sage" => RealtimeSessionCreateResponseVoice.Sage, + "shimmer" => RealtimeSessionCreateResponseVoice.Shimmer, + "verse" => RealtimeSessionCreateResponseVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs index eb3da959..99339470 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ResponseFormat.g.cs @@ -5,10 +5,20 @@ namespace OpenAI { /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
- /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// An object specifying the format that the model must output.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables
+ /// Structured Outputs which ensures the model will match your supplied JSON
+ /// schema. Learn more in the [Structured Outputs
+ /// guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures
+ /// the message the model generates is valid JSON.
+ /// **Important:** when using JSON mode, you **must** also instruct the model
+ /// to produce JSON yourself via a system or user message. Without this, the
+ /// model may generate an unending stream of whitespace until the generation
+ /// reaches the token limit, resulting in a long-running and seemingly "stuck"
+ /// request. Also note that the message content may be partially cut off if
+ /// `finish_reason="length"`, which indicates the generation exceeded
+ /// `max_tokens` or the conversation exceeded the max context length. ///
public readonly partial struct ResponseFormat : global::System.IEquatable { diff --git a/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs b/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs index 6b7084c7..1eb88e47 100644 --- a/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs @@ -190,6 +190,15 @@ public sealed partial class OpenAiApi : global::OpenAI.IOpenAiApi, global::Syste JsonSerializerContext = JsonSerializerContext, }; + /// + /// + /// + public RealtimeClient Realtime => new RealtimeClient(HttpClient, authorizations: Authorizations) + { + ReadResponseAsString = ReadResponseAsString, + JsonSerializerContext = JsonSerializerContext, + }; + /// /// /// diff --git a/src/libs/OpenAI/Generated/OpenAI.RealtimeClient.CreateRealtimeSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.RealtimeClient.CreateRealtimeSession.g.cs new file mode 100644 index 00000000..39f0f1e0 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.RealtimeClient.CreateRealtimeSession.g.cs @@ -0,0 +1,271 @@ + +#nullable enable + +namespace OpenAI +{ + public partial class RealtimeClient + { + partial void PrepareCreateRealtimeSessionArguments( + global::System.Net.Http.HttpClient httpClient, + global::OpenAI.RealtimeSessionCreateRequest request); + partial void PrepareCreateRealtimeSessionRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::OpenAI.RealtimeSessionCreateRequest request); + partial void ProcessCreateRealtimeSessionResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessCreateRealtimeSessionResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Create an ephemeral API token for use in client-side applications with the
+ /// Realtime API. Can be configured with the same session parameters as the
+ /// `session.update` client event.
+ /// It responds with a session object, plus a `client_secret` key which contains
+ /// a usable ephemeral API token that can be used to authenticate browser clients
+ /// for the Realtime API. + ///
+ /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task CreateRealtimeSessionAsync( + global::OpenAI.RealtimeSessionCreateRequest request, + global::System.Threading.CancellationToken cancellationToken = default) + { + request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + + PrepareArguments( + client: HttpClient); + PrepareCreateRealtimeSessionArguments( + httpClient: HttpClient, + request: request); + + var __pathBuilder = new PathBuilder( + path: "/realtime/sessions", + baseUri: HttpClient.BaseAddress); + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Post, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + var __httpRequestContentBody = request.ToJson(JsonSerializerContext); + var __httpRequestContent = new global::System.Net.Http.StringContent( + content: __httpRequestContentBody, + encoding: global::System.Text.Encoding.UTF8, + mediaType: "application/json"); + __httpRequest.Content = __httpRequestContent; + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareCreateRealtimeSessionRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + request: request); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessCreateRealtimeSessionResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessCreateRealtimeSessionResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::OpenAI.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::OpenAI.RealtimeSessionCreateResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::OpenAI.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::OpenAI.RealtimeSessionCreateResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + + /// + /// Create an ephemeral API token for use in client-side applications with the
+ /// Realtime API. Can be configured with the same session parameters as the
+ /// `session.update` client event.
+ /// It responds with a session object, plus a `client_secret` key which contains
+ /// a usable ephemeral API token that can be used to authenticate browser clients
+ /// for the Realtime API. + ///
+ /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The Realtime model used for this session. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Voice cannot be changed during the
+ /// session once the model has responded with audio at least once. Current
+ /// voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`,
+ /// `shimmer` and `verse`. + /// + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + /// + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task CreateRealtimeSessionAsync( + global::OpenAI.RealtimeSessionCreateRequestModel model, + global::System.Collections.Generic.IList? modalities = default, + string? instructions = default, + global::OpenAI.RealtimeSessionCreateRequestVoice? voice = default, + global::OpenAI.RealtimeSessionCreateRequestInputAudioFormat? inputAudioFormat = default, + global::OpenAI.RealtimeSessionCreateRequestOutputAudioFormat? outputAudioFormat = default, + global::OpenAI.RealtimeSessionCreateRequestInputAudioTranscription? inputAudioTranscription = default, + global::OpenAI.RealtimeSessionCreateRequestTurnDetection? turnDetection = default, + global::System.Collections.Generic.IList? tools = default, + string? toolChoice = default, + double? temperature = default, + global::OpenAI.OneOf? maxResponseOutputTokens = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + var __request = new global::OpenAI.RealtimeSessionCreateRequest + { + Modalities = modalities, + Model = model, + Instructions = instructions, + Voice = voice, + InputAudioFormat = inputAudioFormat, + OutputAudioFormat = outputAudioFormat, + InputAudioTranscription = inputAudioTranscription, + TurnDetection = turnDetection, + Tools = tools, + ToolChoice = toolChoice, + Temperature = temperature, + MaxResponseOutputTokens = maxResponseOutputTokens, + }; + + return await CreateRealtimeSessionAsync( + request: __request, + cancellationToken: cancellationToken).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.RealtimeClient.g.cs b/src/libs/OpenAI/Generated/OpenAI.RealtimeClient.g.cs new file mode 100644 index 00000000..150ecf73 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.RealtimeClient.g.cs @@ -0,0 +1,86 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// If no httpClient is provided, a new one will be created.
+ /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + ///
+ public sealed partial class RealtimeClient : global::OpenAI.IRealtimeClient, global::System.IDisposable + { + /// + /// + /// + public const string DefaultBaseUrl = "https://api.openai.com/v1"; + + private bool _disposeHttpClient = true; + + /// + public global::System.Net.Http.HttpClient HttpClient { get; } + + /// + public System.Uri? BaseUri => HttpClient.BaseAddress; + + /// + public global::System.Collections.Generic.List Authorizations { get; } + + /// + public bool ReadResponseAsString { get; set; } +#if DEBUG + = true; +#endif + /// + /// + /// + public global::System.Text.Json.Serialization.JsonSerializerContext JsonSerializerContext { get; set; } = global::OpenAI.SourceGenerationContext.Default; + + + /// + /// Creates a new instance of the RealtimeClient. + /// If no httpClient is provided, a new one will be created. + /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + /// + /// The HttpClient instance. If not provided, a new one will be created. + /// The base URL for the API. If not provided, the default baseUri from OpenAPI spec will be used. + /// The authorizations to use for the requests. + /// Dispose the HttpClient when the instance is disposed. True by default. + public RealtimeClient( + global::System.Net.Http.HttpClient? httpClient = null, + global::System.Uri? baseUri = null, + global::System.Collections.Generic.List? authorizations = null, + bool disposeHttpClient = true) + { + HttpClient = httpClient ?? new global::System.Net.Http.HttpClient(); + HttpClient.BaseAddress ??= baseUri ?? new global::System.Uri(DefaultBaseUrl); + Authorizations = authorizations ?? new global::System.Collections.Generic.List(); + _disposeHttpClient = disposeHttpClient; + + Initialized(HttpClient); + } + + /// + public void Dispose() + { + if (_disposeHttpClient) + { + HttpClient.Dispose(); + } + } + + partial void Initialized( + global::System.Net.Http.HttpClient client); + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/openapi.yaml b/src/libs/OpenAI/openapi.yaml index f85285e9..232cbfc2 100644 --- a/src/libs/OpenAI/openapi.yaml +++ b/src/libs/OpenAI/openapi.yaml @@ -458,7 +458,7 @@ paths: post: tags: - Chat - summary: "Creates a model response for the given chat conversation. Learn more in the\n[text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),\nand [audio](/docs/guides/audio) guides.\n" + summary: "Creates a model response for the given chat conversation. Learn more in the\n[text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),\nand [audio](/docs/guides/audio) guides.\n\nParameter support can differ depending on the model used to generate the\nresponse, particularly for newer reasoning models. Parameters that are only\nsupported for reasoning models are noted below. For the current state of \nunsupported parameters in reasoning models, \n[refer to the reasoning guide](/docs/guides/reasoning).\n" operationId: createChatCompletion requestBody: content: @@ -481,9 +481,9 @@ paths: examples: - title: Default request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_chat_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"developer\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"developer\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"developer\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_chat_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" - title: Image input request: @@ -493,9 +493,9 @@ paths: response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" - title: Streaming request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_chat_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"developer\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"developer\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_chat_model_id\",\n messages: [\n {\"role\": \"developer\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - title: Functions request: @@ -770,23 +770,27 @@ paths: curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-BK7bzQj3FfZFXr7DbL6xJwfo\",\n \"model\": \"gpt-4o-mini\"\n }'\n" python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\"\n)\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\"\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n}\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"batch_size\": \"auto\",\n \"learning_rate_multiplier\": \"auto\",\n \"n_epochs\": \"auto\",\n }\n }\n }\n}\n" - title: Epochs request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"hyperparameters\": {\n \"n_epochs\": 2\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\",\n hyperparameters={\n \"n_epochs\":2\n }\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n model: \"gpt-4o-mini\",\n hyperparameters: { n_epochs: 2 }\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\"n_epochs\": 2},\n}\n" + curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"n_epochs\": 2\n }\n }\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\",\n method={\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"n_epochs\": 2\n }\n }\n }\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n model: \"gpt-4o-mini\",\n method: {\n type: \"supervised\",\n supervised: {\n hyperparameters: {\n n_epochs: 2\n }\n }\n }\n });\n\n console.log(fineTune);\n}\n\nmain();\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\"n_epochs\": 2},\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"batch_size\": \"auto\",\n \"learning_rate_multiplier\": \"auto\",\n \"n_epochs\": 2,\n }\n }\n }\n}\n" - title: Validation file request: curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\"\n }'\n" python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n validation_file=\"file-def456\",\n model=\"gpt-4o-mini\"\n)\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n validation_file: \"file-abc123\"\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n}\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"batch_size\": \"auto\",\n \"learning_rate_multiplier\": \"auto\",\n \"n_epochs\": \"auto\",\n }\n }\n }\n}\n" + - title: DPO + request: + curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"method\": {\n \"type\": \"dpo\",\n \"dpo\": {\n \"hyperparameters\": {\n \"beta\": 0.1,\n }\n }\n }\n }'\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n \"method\": {\n \"type\": \"dpo\",\n \"dpo\": {\n \"hyperparameters\": {\n \"beta\": 0.1,\n \"batch_size\": \"auto\",\n \"learning_rate_multiplier\": \"auto\",\n \"n_epochs\": \"auto\",\n }\n }\n }\n}\n" - title: W&B Integration request: curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"name\": \"ft-run-display-name\"\n \"tags\": [\n \"first-experiment\", \"v2\"\n ]\n }\n }\n ]\n }'\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"entity\": None,\n \"run_id\": \"ftjob-abc123\"\n }\n }\n ]\n}\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"entity\": None,\n \"run_id\": \"ftjob-abc123\"\n }\n }\n ],\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"batch_size\": \"auto\",\n \"learning_rate_multiplier\": \"auto\",\n \"n_epochs\": \"auto\",\n }\n }\n }\n}\n" get: tags: - Fine-tuning @@ -851,7 +855,7 @@ paths: curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.retrieve(\"ftjob-abc123\")\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.retrieve(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0,\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n }\n }\n }\n}\n" '/fine_tuning/jobs/{fine_tuning_job_id}/cancel': post: tags: @@ -882,7 +886,7 @@ paths: curl: "curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.cancel(\"ftjob-abc123\")\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.cancel(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\nmain();" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"hyperparameters\": {\n \"n_epochs\": \"auto\"\n },\n \"status\": \"cancelled\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\"\n}\n" + response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"cancelled\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\"\n}\n" '/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints': get: tags: @@ -3052,6 +3056,34 @@ paths: curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" response: content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" + /realtime/sessions: + post: + tags: + - Realtime + summary: "Create an ephemeral API token for use in client-side applications with the\nRealtime API. Can be configured with the same session parameters as the\n`session.update` client event.\n\nIt responds with a session object, plus a `client_secret` key which contains\na usable ephemeral API token that can be used to authenticate browser clients\nfor the Realtime API.\n" + operationId: create-realtime-session + requestBody: + description: Create an ephemeral API key with the given session configuration. + content: + application/json: + schema: + $ref: '#/components/schemas/RealtimeSessionCreateRequest' + required: true + responses: + '200': + description: Session created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/RealtimeSessionCreateResponse' + x-oaiMeta: + name: Create session + group: realtime + returns: 'The created Realtime session object, plus an ephemeral key' + examples: + request: + curl: "curl -X POST https://api.openai.com/v1/realtime/sessions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"model\": \"gpt-4o-realtime-preview-2024-12-17\",\n \"modalities\": [\"audio\", \"text\"],\n \"instructions\": \"You are a friendly assistant.\"\n }'\n" + response: "{\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-12-17\",\n \"modalities\": [\"audio\", \"text\"],\n \"instructions\": \"You are a friendly assistant.\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_response_output_tokens\": 200,\n \"client_secret\": {\n \"value\": \"ek_abc123\", \n \"expires_at\": 1234567890\n }\n}\n" /threads: post: tags: @@ -5489,11 +5521,40 @@ components: description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' nullable: true deprecated: true + description: "Messages sent by the model in response to user messages.\n" ChatCompletionRequestAssistantMessageContentPart: oneOf: - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' x-oaiExpandable: true + ChatCompletionRequestDeveloperMessage: + title: Developer message + required: + - content + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The contents of the developer message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + description: 'An array of content parts with a defined type. For developer messages, only type `text` is supported.' + description: The contents of the developer message. + role: + enum: + - developer + type: string + description: 'The role of the messages author, in this case `developer`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + description: "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, `developer` messages\nreplace the previous `system` messages.\n" ChatCompletionRequestFunctionMessage: title: Function message required: @@ -5517,6 +5578,7 @@ components: deprecated: true ChatCompletionRequestMessage: oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestDeveloperMessage' - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' @@ -5639,6 +5701,7 @@ components: name: type: string description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + description: "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, use `developer` messages\nfor this purpose instead.\n" ChatCompletionRequestSystemMessageContentPart: oneOf: - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' @@ -5703,6 +5766,7 @@ components: name: type: string description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + description: "Messages sent by an end user, containing prompts or additional context\ninformation.\n" ChatCompletionRequestUserMessageContentPart: oneOf: - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' @@ -6237,6 +6301,8 @@ components: anyOf: - type: string - enum: + - o1 + - o1-2024-12-17 - o1-preview - o1-preview-2024-09-12 - o1-mini @@ -6245,10 +6311,11 @@ components: - gpt-4o-2024-11-20 - gpt-4o-2024-08-06 - gpt-4o-2024-05-13 - - gpt-4o-realtime-preview - - gpt-4o-realtime-preview-2024-10-01 - gpt-4o-audio-preview - gpt-4o-audio-preview-2024-10-01 + - gpt-4o-audio-preview-2024-12-17 + - gpt-4o-mini-audio-preview + - gpt-4o-mini-audio-preview-2024-12-17 - chatgpt-4o-latest - gpt-4o-mini - gpt-4o-mini-2024-07-18 @@ -6277,9 +6344,17 @@ components: x-oaiTypeLabel: string store: type: boolean - description: "Whether or not to store the output of this chat completion request\nfor use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.\n" + description: "Whether or not to store the output of this chat completion request for \nuse in our [model distillation](/docs/guides/distillation) or\n[evals](/docs/guides/evals) products.\n" default: false nullable: true + reasoning_effort: + enum: + - low + - medium + - high + type: string + description: "**o1 models only** \n\nConstrains effort on reasoning for \n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `low`, `medium`, and `high`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response.\n" + default: medium metadata: type: object additionalProperties: @@ -6290,31 +6365,31 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.\n" default: 0 nullable: true logit_bias: type: object additionalProperties: type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" + description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.\n" default: nullable: true x-oaiTypeLabel: map logprobs: type: boolean - description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' + description: "Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.\n" default: false nullable: true top_logprobs: maximum: 20 minimum: 0 type: integer - description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' + description: "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n" nullable: true max_tokens: type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning).\n" + description: "The maximum number of [tokens](/tokenizer) that can be generated in the\nchat completion. This value can be used to control\n[costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o1 series models](/docs/guides/reasoning).\n" nullable: true deprecated: true max_completion_tokens: @@ -6371,7 +6446,7 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.\n" default: 0 nullable: true response_format: @@ -6379,7 +6454,7 @@ components: - $ref: '#/components/schemas/ResponseFormatText' - $ref: '#/components/schemas/ResponseFormatJsonObject' - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" + description: "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures\nthe message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model\nto produce JSON yourself via a system or user message. Without this, the\nmodel may generate an unending stream of whitespace until the generation\nreaches the token limit, resulting in a long-running and seemingly \"stuck\"\nrequest. Also note that the message content may be partially cut off if\n`finish_reason=\"length\"`, which indicates the generation exceeded\n`max_tokens` or the conversation exceeded the max context length.\n" x-oaiExpandable: true seed: maximum: 9223372036854776000 @@ -6419,7 +6494,7 @@ components: maximum: 2 minimum: 0 type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.\n" default: 1 nullable: true example: 1 @@ -6427,7 +6502,7 @@ components: maximum: 1 minimum: 0 type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" + description: "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" default: 1 nullable: true example: 1 @@ -6452,7 +6527,7 @@ components: type: string description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" + description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n\n`none` means the model will not call a function and instead generates a\nmessage.\n\n`auto` means the model can pick between generating a message or calling a\nfunction.\n\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\nmodel to call that function.\n\n`none` is the default when no functions are present. `auto` is the default\nif functions are present.\n" deprecated: true x-oaiExpandable: true functions: @@ -7025,7 +7100,7 @@ components: x-oaiTypeLabel: string training_file: type: string - description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input), [completions](/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](/docs/api-reference/fine-tuning/preference-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" example: file-abc123 hyperparameters: type: object @@ -7060,7 +7135,8 @@ components: type: integer description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" default: auto - description: The hyperparameters used for the fine-tuning job. + description: "The hyperparameters used for the fine-tuning job.\nThis value is now deprecated in favor of `method`, and should be passed in under the `method` parameter.\n" + deprecated: true suffix: maxLength: 64 minLength: 1 @@ -7120,6 +7196,8 @@ components: description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" nullable: true example: 42 + method: + $ref: '#/components/schemas/FineTuneMethod' CreateImageEditRequest: required: - prompt @@ -8527,6 +8605,197 @@ components: type: integer description: Controls whether the assistant message is trained against (0 or 1) - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + FineTuneChatRequestInput: + type: object + properties: + messages: + minItems: 1 + type: array + items: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: A list of tools the model may generate JSON inputs for. + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + functions: + maxItems: 128 + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + description: A list of functions the model may generate JSON inputs for. + deprecated: true + description: The per-line training example of a fine-tuning input file for chat models using the supervised method. + x-oaiMeta: + name: Training format for chat models using the supervised method + example: "{\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" },\n {\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": \"call_id\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco, USA\\\", \\\"format\\\": \\\"celsius\\\"}\"\n }\n }\n ]\n }\n ],\n \"parallel_tool_calls\": false,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and country, eg. San Francisco, USA\"\n },\n \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"] }\n },\n \"required\": [\"location\", \"format\"]\n }\n }\n }\n ]\n}\n" + FineTuneCompletionRequestInput: + type: object + properties: + prompt: + type: string + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. + description: The per-line training example of a fine-tuning input file for completions models + x-oaiMeta: + name: Training format for completions models + example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" + FineTuneDPOMethod: + type: object + properties: + hyperparameters: + type: object + properties: + beta: + oneOf: + - enum: + - auto + type: string + - maximum: 2 + minimum: 0 + exclusiveMinimum: true + type: number + description: "The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model.\n" + default: auto + batch_size: + oneOf: + - enum: + - auto + type: string + - maximum: 256 + minimum: 1 + type: integer + description: "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.\n" + default: auto + learning_rate_multiplier: + oneOf: + - enum: + - auto + type: string + - minimum: 0 + exclusiveMinimum: true + type: number + description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.\n" + default: auto + n_epochs: + oneOf: + - enum: + - auto + type: string + - maximum: 50 + minimum: 1 + type: integer + description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n" + default: auto + description: The hyperparameters used for the fine-tuning job. + description: Configuration for the DPO fine-tuning method. + FineTuneMethod: + type: object + properties: + type: + enum: + - supervised + - dpo + type: string + description: The type of method. Is either `supervised` or `dpo`. + supervised: + $ref: '#/components/schemas/FineTuneSupervisedMethod' + dpo: + $ref: '#/components/schemas/FineTuneDPOMethod' + description: The method used for fine-tuning. + FineTunePreferenceRequestInput: + type: object + properties: + input: + type: object + properties: + messages: + minItems: 1 + type: array + items: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: A list of tools the model may generate JSON inputs for. + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + preferred_completion: + maxItems: 1 + type: array + items: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + x-oaiExpandable: true + description: The preferred completion message for the output. + non_preferred_completion: + maxItems: 1 + type: array + items: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + x-oaiExpandable: true + description: The non-preferred completion message for the output. + description: The per-line training example of a fine-tuning input file for chat models using the dpo method. + x-oaiMeta: + name: Training format for chat models using the preference method + example: "{\n \"input\": {\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" }\n ]\n },\n \"preferred_completion\": [\n {\n \"role\": \"assistant\",\n \"content\": \"The weather in San Francisco is 70 degrees Fahrenheit.\"\n }\n ],\n \"non_preferred_completion\": [\n {\n \"role\": \"assistant\",\n \"content\": \"The weather in San Francisco is 21 degrees Celsius.\"\n }\n ]\n}\n" + FineTuneSupervisedMethod: + type: object + properties: + hyperparameters: + type: object + properties: + batch_size: + oneOf: + - enum: + - auto + type: string + - maximum: 256 + minimum: 1 + type: integer + description: "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.\n" + default: auto + learning_rate_multiplier: + oneOf: + - enum: + - auto + type: string + - minimum: 0 + exclusiveMinimum: true + type: number + description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.\n" + default: auto + n_epochs: + oneOf: + - enum: + - auto + type: string + - maximum: 50 + minimum: 1 + type: integer + description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n" + default: auto + description: The hyperparameters used for the fine-tuning job. + description: Configuration for the supervised fine-tuning method. FineTuningIntegration: title: Fine-Tuning Job Integration required: @@ -8617,10 +8886,28 @@ components: description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. nullable: true hyperparameters: - required: - - n_epochs type: object properties: + batch_size: + oneOf: + - enum: + - auto + type: string + - maximum: 256 + minimum: 1 + type: integer + description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" + default: auto + learning_rate_multiplier: + oneOf: + - enum: + - auto + type: string + - minimum: 0 + exclusiveMinimum: true + type: number + description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" + default: auto n_epochs: oneOf: - enum: @@ -8629,9 +8916,9 @@ components: - maximum: 50 minimum: 1 type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n\"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs." + description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" default: auto - description: 'The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.' + description: The hyperparameters used for the fine-tuning job. This value will only be returned when running `supervised` jobs. model: type: string description: The base model that is being fine-tuned. @@ -8686,10 +8973,12 @@ components: type: integer description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. nullable: true + method: + $ref: '#/components/schemas/FineTuneMethod' description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" x-oaiMeta: name: The fine-tuning job object - example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" + example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0,\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n }\n }\n }\n}\n" FineTuningJobCheckpoint: title: FineTuningJobCheckpoint required: @@ -8753,72 +9042,40 @@ components: - message type: object properties: + object: + enum: + - fine_tuning.job.event + type: string + description: 'The object type, which is always "fine_tuning.job.event".' id: type: string + description: The object identifier. created_at: type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. level: enum: - info - warn - error type: string + description: The log level of the event. message: type: string - object: + description: The message of the event. + type: enum: - - fine_tuning.job.event + - message + - metrics type: string + description: The type of event. + data: + type: object + description: The data associated with the event. description: Fine-tuning job event object x-oaiMeta: name: The fine-tuning job event object - example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" - FinetuneChatRequestInput: - type: object - properties: - messages: - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: A list of tools the model may generate JSON inputs for. - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: A list of functions the model may generate JSON inputs for. - deprecated: true - description: The per-line training example of a fine-tuning input file for chat models - x-oaiMeta: - name: Training format for chat models - example: "{\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" },\n {\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": \"call_id\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco, USA\\\", \\\"format\\\": \\\"celsius\\\"}\"\n }\n }\n ]\n }\n ],\n \"parallel_tool_calls\": false,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and country, eg. San Francisco, USA\"\n },\n \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"] }\n },\n \"required\": [\"location\", \"format\"]\n }\n }\n }\n ]\n}\n" - FinetuneCompletionRequestInput: - type: object - properties: - prompt: - type: string - description: The input prompt for this training example. - completion: - type: string - description: The desired completion for this training example. - description: The per-line training example of a fine-tuning input file for completions models - x-oaiMeta: - name: Training format for completions models - example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" + example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\",\n \"data\": {},\n \"type\": \"message\"\n}\n" FunctionObject: required: - name @@ -10738,6 +10995,9 @@ components: - response.cancel type: string description: 'The event type, must be `response.cancel`.' + response_id: + type: string + description: "A specific response ID to cancel - if not provided, will cancel an \nin-progress response in the default conversation.\n" description: "Send this event to cancel an in-progress response. The server will respond \nwith a `response.cancelled` event or an error if there is no response to \ncancel.\n" x-oaiMeta: name: response.cancel @@ -10746,7 +11006,6 @@ components: RealtimeClientEventResponseCreate: required: - type - - response type: object properties: event_id: @@ -10758,12 +11017,12 @@ components: type: string description: 'The event type, must be `response.create`.' response: - $ref: '#/components/schemas/RealtimeSession' + $ref: '#/components/schemas/RealtimeResponseCreateParams' description: "This event instructs the server to create a Response, which means triggering \nmodel inference. When in Server VAD mode, the server will create Responses \nautomatically.\n\nA Response will include at least one Item, and may have two, in which case \nthe second will be a function call. These Items will be appended to the \nconversation history.\n\nThe server will respond with a `response.created` event, events for Items \nand content created, and finally a `response.done` event to indicate the \nResponse is complete.\n\nThe `response.create` event includes inference configuration like \n`instructions`, and `temperature`. These fields will override the Session's \nconfiguration for this Response only.\n" x-oaiMeta: name: response.create group: realtime - example: "{\n \"event_id\": \"event_234\",\n \"type\": \"response.create\",\n \"response\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Please assist the user.\",\n \"voice\": \"sage\",\n \"output_audio_format\": \"pcm16\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"calculate_sum\",\n \"description\": \"Calculates the sum of two numbers.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": { \"type\": \"number\" },\n \"b\": { \"type\": \"number\" }\n },\n \"required\": [\"a\", \"b\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.7,\n \"max_output_tokens\": 150\n }\n}\n" + example: "{\n \"event_id\": \"event_234\",\n \"type\": \"response.create\",\n \"response\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Please assist the user.\",\n \"voice\": \"sage\",\n \"output_audio_format\": \"pcm16\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"calculate_sum\",\n \"description\": \"Calculates the sum of two numbers.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": { \"type\": \"number\" },\n \"b\": { \"type\": \"number\" }\n },\n \"required\": [\"a\", \"b\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_output_tokens\": 1024\n }\n}\n" RealtimeClientEventSessionUpdate: required: - type @@ -10779,12 +11038,12 @@ components: type: string description: 'The event type, must be `session.update`.' session: - $ref: '#/components/schemas/RealtimeSession' + $ref: '#/components/schemas/RealtimeSessionCreateRequest' description: "Send this event to update the session’s default configuration. The client may \nsend this event at any time to update the session configuration, and any \nfield may be updated at any time, except for \"voice\". The server will respond \nwith a `session.updated` event that shows the full effective configuration. \nOnly fields that are present are updated, thus the correct way to clear a \nfield like \"instructions\" is to pass an empty string.\n" x-oaiMeta: name: session.update group: realtime - example: "{\n \"event_id\": \"event_123\",\n \"type\": \"session.update\",\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"You are a helpful assistant.\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 500\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather...\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": { \"type\": \"string\" }\n },\n \"required\": [\"location\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" + example: "{\n \"event_id\": \"event_123\",\n \"type\": \"session.update\",\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"You are a helpful assistant.\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 500,\n \"create_response\": true\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather...\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": { \"type\": \"string\" }\n },\n \"required\": [\"location\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" RealtimeConversationItem: type: object properties: @@ -10909,6 +11168,92 @@ components: example: 30 description: Usage statistics for the response. description: The response resource. + RealtimeResponseCreateParams: + type: object + properties: + modalities: + type: array + items: + enum: + - text + - audio + type: string + description: "The set of modalities the model can respond with. To disable audio,\nset this to [\"text\"].\n" + instructions: + type: string + description: "The default system instructions (i.e. system message) prepended to model \ncalls. This field allows the client to guide the model on desired \nresponses. The model can be instructed on response content and format, \n(e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good \nresponses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion \ninto your voice\", \"laugh frequently\"). The instructions are not guaranteed \nto be followed by the model, but they provide guidance to the model on the \ndesired behavior.\n\nNote that the server sets default instructions which will be used if this \nfield is not set and are visible in the `session.created` event at the \nstart of the session.\n" + voice: + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + type: string + description: "The voice the model uses to respond. Voice cannot be changed during the \nsession once the model has responded with audio at least once. Current \nvoice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, \n`shimmer` and `verse`.\n" + output_audio_format: + enum: + - pcm16 + - g711_ulaw + - g711_alaw + type: string + description: "The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + tools: + type: array + items: + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool, i.e. `function`.' + name: + type: string + description: The name of the function. + description: + type: string + description: "The description of the function, including guidance on when and how \nto call it, and guidance about what to tell the user when calling \n(if anything).\n" + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: "How the model chooses tools. Options are `auto`, `none`, `required`, or \nspecify a function, like `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}`.\n" + temperature: + type: number + description: "Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n" + max_response_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: "Maximum number of output tokens for a single assistant response,\ninclusive of tool calls. Provide an integer between 1 and 4096 to\nlimit output tokens, or `inf` for the maximum available tokens for a\ngiven model. Defaults to `inf`.\n" + conversation: + oneOf: + - type: string + - enum: + - auto + - none + type: string + default: auto + description: "Controls which conversation the response is added to. Currently supports\n`auto` and `none`, with `auto` as the default value. The `auto` value\nmeans that the contents of the response will be added to the default\nconversation. Set this to `none` to create an out-of-band response which \nwill not add items to default conversation.\n" + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat. Keys can be a maximum of 64 characters long and values can be a\nmaximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + input: + type: array + items: + $ref: '#/components/schemas/RealtimeConversationItem' + description: "Input items to include in the prompt for the model. Creates a new context\nfor this response, without including the default conversation. Can include\nreferences to items from the default conversation.\n" + description: Create a new Realtime response with these parameters RealtimeServerEventConversationCreated: required: - event_id @@ -11110,6 +11455,9 @@ components: type: string description: 'The event type, must be `error`.' error: + required: + - type + - message type: object properties: type: @@ -11118,15 +11466,18 @@ components: code: type: string description: 'Error code, if any.' + nullable: true message: type: string description: A human-readable error message. param: type: string description: 'Parameter related to the error, if any.' + nullable: true event_id: type: string description: "The event_id of the client event that caused the error, if applicable.\n" + nullable: true description: Details of the error. description: "Returned when an error occurs, which could be a client problem or a server \nproblem. Most errors are recoverable and the session will stay open, we \nrecommend to implementors to monitor and log error messages by default.\n" x-oaiMeta: @@ -11254,8 +11605,11 @@ components: type: object properties: name: + enum: + - requests + - tokens type: string - description: 'The name of the rate limit (`requests`, `tokens`).' + description: "The name of the rate limit (`requests`, `tokens`).\n" limit: type: integer description: The maximum allowed value for the rate limit. @@ -11513,6 +11867,9 @@ components: type: object properties: type: + enum: + - audio + - text type: string description: 'The content type ("text", "audio").' text: @@ -11811,7 +12168,7 @@ components: x-oaiMeta: name: session.created group: realtime - example: "{\n \"event_id\": \"event_1234\",\n \"type\": \"session.created\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"...model instructions here...\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" + example: "{\n \"event_id\": \"event_1234\",\n \"type\": \"session.created\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-12-17\",\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"...model instructions here...\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" RealtimeServerEventSessionUpdated: required: - event_id @@ -11833,7 +12190,7 @@ components: x-oaiMeta: name: session.updated group: realtime - example: "{\n \"event_id\": \"event_5678\",\n \"type\": \"session.updated\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\"],\n \"instructions\": \"New instructions\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_response_output_tokens\": 200\n }\n}\n" + example: "{\n \"event_id\": \"event_5678\",\n \"type\": \"session.updated\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-12-17\",\n \"modalities\": [\"text\"],\n \"instructions\": \"New instructions\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_response_output_tokens\": 200\n }\n}\n" RealtimeSession: type: object properties: @@ -11952,6 +12309,219 @@ components: description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' default: inf description: "A session refers to a single WebSocket connection between a client and the server.\n\nOnce a client creates a session, it then sends JSON-formatted events containing text and audio chunks.\nThe server will respond in kind with audio containing voice output, a text transcript of that voice output,\nand function calls (if functions are provided by the client).\n\nA realtime Session represents the overall client-server interaction, and contains default configuration.\n\nIt has a set of default values which can be updated at any time (via session.update) or on a per-response level (via response.create).\n" + RealtimeSessionCreateRequest: + required: + - model + type: object + properties: + modalities: + items: + enum: + - text + - audio + type: string + description: "The set of modalities the model can respond with. To disable audio,\nset this to [\"text\"].\n" + model: + enum: + - gpt-4o-realtime-preview + - gpt-4o-realtime-preview-2024-10-01 + - gpt-4o-realtime-preview-2024-12-17 + - gpt-4o-mini-realtime-preview + - gpt-4o-mini-realtime-preview-2024-12-17 + type: string + description: "The Realtime model used for this session.\n" + instructions: + type: string + description: "The default system instructions (i.e. system message) prepended to model \ncalls. This field allows the client to guide the model on desired \nresponses. The model can be instructed on response content and format, \n(e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good \nresponses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion \ninto your voice\", \"laugh frequently\"). The instructions are not guaranteed \nto be followed by the model, but they provide guidance to the model on the \ndesired behavior.\n\nNote that the server sets default instructions which will be used if this \nfield is not set and are visible in the `session.created` event at the \nstart of the session.\n" + voice: + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + type: string + description: "The voice the model uses to respond. Voice cannot be changed during the \nsession once the model has responded with audio at least once. Current \nvoice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, \n`shimmer` and `verse`.\n" + input_audio_format: + enum: + - pcm16 + - g711_ulaw + - g711_alaw + type: string + description: "The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + output_audio_format: + enum: + - pcm16 + - g711_ulaw + - g711_alaw + type: string + description: "The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + input_audio_transcription: + type: object + properties: + model: + type: string + description: "The model to use for transcription, `whisper-1` is the only currently \nsupported model.\n" + description: "Configuration for input audio transcription, defaults to off and can be \nset to `null` to turn off once on. Input audio transcription is not native \nto the model, since the model consumes audio directly. Transcription runs \nasynchronously through Whisper and should be treated as rough guidance \nrather than the representation understood by the model.\n" + turn_detection: + type: object + properties: + type: + type: string + description: "Type of turn detection, only `server_vad` is currently supported.\n" + threshold: + type: number + description: "Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A \nhigher threshold will require louder audio to activate the model, and \nthus might perform better in noisy environments.\n" + prefix_padding_ms: + type: integer + description: "Amount of audio to include before the VAD detected speech (in \nmilliseconds). Defaults to 300ms.\n" + silence_duration_ms: + type: integer + description: "Duration of silence to detect speech stop (in milliseconds). Defaults \nto 500ms. With shorter values the model will respond more quickly, \nbut may jump in on short pauses from the user.\n" + create_response: + type: boolean + description: "Whether or not to automatically generate a response when VAD is\nenabled. `true` by default.\n" + default: true + description: "Configuration for turn detection. Can be set to `null` to turn off. Server \nVAD means that the model will detect the start and end of speech based on \naudio volume and respond at the end of user speech.\n" + tools: + type: array + items: + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool, i.e. `function`.' + name: + type: string + description: The name of the function. + description: + type: string + description: "The description of the function, including guidance on when and how \nto call it, and guidance about what to tell the user when calling \n(if anything).\n" + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: "How the model chooses tools. Options are `auto`, `none`, `required`, or \nspecify a function.\n" + temperature: + type: number + description: "Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n" + max_response_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: "Maximum number of output tokens for a single assistant response,\ninclusive of tool calls. Provide an integer between 1 and 4096 to\nlimit output tokens, or `inf` for the maximum available tokens for a\ngiven model. Defaults to `inf`.\n" + description: Realtime session object configuration. + RealtimeSessionCreateResponse: + type: object + properties: + client_secret: + type: object + properties: + value: + type: string + description: "Ephemeral key usable in client environments to authenticate connections\nto the Realtime API. Use this in client-side environments rather than\na standard API token, which should only be used server-side.\n" + expires_at: + type: integer + description: "Timestamp for when the token expires. Currently, all tokens expire\nafter one minute.\n" + description: Ephemeral key returned by the API. + modalities: + items: + enum: + - text + - audio + type: string + description: "The set of modalities the model can respond with. To disable audio,\nset this to [\"text\"].\n" + instructions: + type: string + description: "The default system instructions (i.e. system message) prepended to model \ncalls. This field allows the client to guide the model on desired \nresponses. The model can be instructed on response content and format, \n(e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good \nresponses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion \ninto your voice\", \"laugh frequently\"). The instructions are not guaranteed \nto be followed by the model, but they provide guidance to the model on the \ndesired behavior.\n\nNote that the server sets default instructions which will be used if this \nfield is not set and are visible in the `session.created` event at the \nstart of the session.\n" + voice: + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + type: string + description: "The voice the model uses to respond. Voice cannot be changed during the \nsession once the model has responded with audio at least once. Current \nvoice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, \n`shimmer` and `verse`.\n" + input_audio_format: + type: string + description: "The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + output_audio_format: + type: string + description: "The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + input_audio_transcription: + type: object + properties: + model: + type: string + description: "The model to use for transcription, `whisper-1` is the only currently \nsupported model.\n" + description: "Configuration for input audio transcription, defaults to off and can be \nset to `null` to turn off once on. Input audio transcription is not native \nto the model, since the model consumes audio directly. Transcription runs \nasynchronously through Whisper and should be treated as rough guidance \nrather than the representation understood by the model.\n" + turn_detection: + type: object + properties: + type: + type: string + description: "Type of turn detection, only `server_vad` is currently supported.\n" + threshold: + type: number + description: "Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A \nhigher threshold will require louder audio to activate the model, and \nthus might perform better in noisy environments.\n" + prefix_padding_ms: + type: integer + description: "Amount of audio to include before the VAD detected speech (in \nmilliseconds). Defaults to 300ms.\n" + silence_duration_ms: + type: integer + description: "Duration of silence to detect speech stop (in milliseconds). Defaults \nto 500ms. With shorter values the model will respond more quickly, \nbut may jump in on short pauses from the user.\n" + description: "Configuration for turn detection. Can be set to `null` to turn off. Server \nVAD means that the model will detect the start and end of speech based on \naudio volume and respond at the end of user speech.\n" + tools: + type: array + items: + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool, i.e. `function`.' + name: + type: string + description: The name of the function. + description: + type: string + description: "The description of the function, including guidance on when and how \nto call it, and guidance about what to tell the user when calling \n(if anything).\n" + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: "How the model chooses tools. Options are `auto`, `none`, `required`, or \nspecify a function.\n" + temperature: + type: number + description: "Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n" + max_response_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: "Maximum number of output tokens for a single assistant response,\ninclusive of tool calls. Provide an integer between 1 and 4096 to\nlimit output tokens, or `inf` for the maximum available tokens for a\ngiven model. Defaults to `inf`.\n" + description: "A new Realtime session configuration, with an ephermeral key. Default TTL\nfor keys is one minute.\n" + x-oaiMeta: + name: The session object + group: realtime + example: "{\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-12-17\",\n \"modalities\": [\"audio\", \"text\"],\n \"instructions\": \"You are a friendly assistant.\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_response_output_tokens\": 200,\n \"client_secret\": {\n \"value\": \"ek_abc123\", \n \"expires_at\": 1234567890\n }\n}\n" ResponseFormatJsonObject: required: - type @@ -15488,10 +16058,13 @@ x-oaiMeta: key: cancelFineTuningJob path: cancel - type: object - key: FinetuneChatRequestInput + key: FineTuneChatRequestInput path: chat-input - type: object - key: FinetuneCompletionRequestInput + key: FineTunePreferenceRequestInput + path: preference-input + - type: object + key: FineTuneCompletionRequestInput path: completions-input - type: object key: FineTuningJob @@ -16028,8 +16601,19 @@ x-oaiMeta: - id: realtime title: Realtime beta: true - description: "Communicate with a GPT-4o class model live, in real time, over WebSocket.\nProduces both audio and text transcriptions.\n[Learn more about the Realtime API](/docs/guides/realtime).\n" + description: "Communicate with a GPT-4o class model in real time using WebRTC or \nWebSockets. Supports text and audio inputs and ouputs, along with audio\ntranscriptions.\n[Learn more about the Realtime API](/docs/guides/realtime).\n" navigationGroup: realtime + - id: realtime-sessions + title: Session tokens + description: "REST API endpoint to generate ephemeral session tokens for use in client-side\napplications.\n" + navigationGroup: realtime + sections: + - type: endpoint + key: create-realtime-session + path: create + - type: object + key: RealtimeSessionCreateResponse + path: session_object - id: realtime-client-events title: Client events description: "These are events that the OpenAI Realtime WebSocket server will accept from the client.\n"