diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionModalitie.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionModalitie.g.cs new file mode 100644 index 00000000..9627b87e --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionModalitie.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionModalitieJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionModalitie Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionModalitieExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionModalitie value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.ChatCompletionModalitieExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionModalitieNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionModalitieNullable.g.cs new file mode 100644 index 00000000..1262be53 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionModalitieNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionModalitieNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionModalitie? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionModalitieExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionModalitie? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.ChatCompletionModalitieExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs new file mode 100644 index 00000000..376cbbbd --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioInputAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.cs new file mode 100644 index 00000000..6296634e --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.cs new file mode 100644 index 00000000..a655be9d --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionRequestMessageContentPartAudioType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionRequestMessageContentPartAudioType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionRequestMessageContentPartAudioType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.cs new file mode 100644 index 00000000..1e4fe8e4 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.ChatCompletionRequestMessageContentPartAudioType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.ChatCompletionRequestMessageContentPartAudioType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.ChatCompletionRequestMessageContentPartAudioType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestUserMessageContentPart.g.cs b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestUserMessageContentPart.g.cs index 1b9350a3..91cff9b0 100644 --- a/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestUserMessageContentPart.g.cs +++ b/src/libs/OpenAI/Generated/JsonConverters.ChatCompletionRequestUserMessageContentPart.g.cs @@ -40,9 +40,22 @@ public class ChatCompletionRequestUserMessageContentPartJsonConverter : global:: { } + readerCopy = reader; + global::OpenAI.ChatCompletionRequestMessageContentPartAudio? audio = default; + try + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio).Name}"); + audio = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, typeInfo); + } + catch (global::System.Text.Json.JsonException) + { + } + var result = new global::OpenAI.ChatCompletionRequestUserMessageContentPart( text, - image + image, + audio ); if (text != null) @@ -57,6 +70,12 @@ public class ChatCompletionRequestUserMessageContentPartJsonConverter : global:: throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestMessageContentPartImage).Name}"); _ = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } + else if (audio != null) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio).Name}"); + _ = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } return result; } @@ -82,6 +101,12 @@ public override void Write( throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestMessageContentPartImage).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Image, typeInfo); } + else if (value.IsAudio) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Audio, typeInfo); + } } } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioFormat.g.cs b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioFormat.g.cs new file mode 100644 index 00000000..929ba66f --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioFormat.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.CreateChatCompletionRequestAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.CreateChatCompletionRequestAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.CreateChatCompletionRequestAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.CreateChatCompletionRequestAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.CreateChatCompletionRequestAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.cs new file mode 100644 index 00000000..25be7588 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.CreateChatCompletionRequestAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.CreateChatCompletionRequestAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.CreateChatCompletionRequestAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.CreateChatCompletionRequestAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.CreateChatCompletionRequestAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioVoice.g.cs b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioVoice.g.cs new file mode 100644 index 00000000..27495102 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioVoice.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioVoiceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.CreateChatCompletionRequestAudioVoice Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.CreateChatCompletionRequestAudioVoiceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.CreateChatCompletionRequestAudioVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.CreateChatCompletionRequestAudioVoice value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.CreateChatCompletionRequestAudioVoiceExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.cs new file mode 100644 index 00000000..e4c5ed23 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioVoiceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.CreateChatCompletionRequestAudioVoice? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.CreateChatCompletionRequestAudioVoiceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.CreateChatCompletionRequestAudioVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.CreateChatCompletionRequestAudioVoice? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.CreateChatCompletionRequestAudioVoiceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs new file mode 100644 index 00000000..a494c1aa --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeClientEventResponseCreateResponseMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullable.g.cs new file mode 100644 index 00000000..8da6e017 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeClientEventResponseCreateResponseMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs new file mode 100644 index 00000000..fcb4f135 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullable.g.cs new file mode 100644 index 00000000..ae8792b9 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs new file mode 100644 index 00000000..eec91bb3 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionCreatedSessionMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullable.g.cs new file mode 100644 index 00000000..368b1f63 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs new file mode 100644 index 00000000..3a52d971 --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs @@ -0,0 +1,49 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionUpdatedSessionMaxOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullable.g.cs b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullable.g.cs new file mode 100644 index 00000000..d8caf3cc --- /dev/null +++ b/src/libs/OpenAI/Generated/JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullable.g.cs @@ -0,0 +1,56 @@ +#nullable enable + +namespace OpenAI.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs b/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs index 4033355b..c9cbf63e 100644 --- a/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs +++ b/src/libs/OpenAI/Generated/JsonSerializerContext.g.cs @@ -13,58 +13,258 @@ namespace OpenAI DefaultIgnoreCondition = global::System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull, Converters = new global::System.Type[] { - typeof(global::OpenAI.JsonConverters.ListModelsResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListModelsResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ModelObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ModelObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateCompletionRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateCompletionRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateCompletionResponseChoiceFinishReasonJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateCompletionResponseChoiceFinishReasonNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateCompletionResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateCompletionResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsCodeTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsCodeTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FileSearchRankingOptionsRankerJsonConverter), + typeof(global::OpenAI.JsonConverters.FileSearchRankingOptionsRankerNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsFunctionTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsFunctionTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsApiResponseFormatOptionEnumJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsApiResponseFormatOptionEnumNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ResponseFormatTextTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ResponseFormatTextTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ResponseFormatJsonObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ResponseFormatJsonObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ResponseFormatJsonSchemaTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ResponseFormatJsonSchemaTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ThreadStreamEventVariant1EventJsonConverter), + typeof(global::OpenAI.JsonConverters.ThreadStreamEventVariant1EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ThreadObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ThreadObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant1EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant1EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectRequiredActionTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectRequiredActionTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunToolCallObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunToolCallObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectLastErrorCodeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectLastErrorCodeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectIncompleteDetailsReasonJsonConverter), + typeof(global::OpenAI.JsonConverters.RunObjectIncompleteDetailsReasonNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.TruncationObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.TruncationObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsApiToolChoiceOptionEnumJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsApiToolChoiceOptionEnumNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsNamedToolChoiceTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsNamedToolChoiceTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant2EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant2EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant3EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant3EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant4EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant4EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant5EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant5EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant6EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant6EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant7EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant7EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant8EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant8EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant9EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant9EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant10EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventVariant10EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant1EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant1EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsMessageCreationObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsMessageCreationObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectLastErrorCodeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepObjectLastErrorCodeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant2EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant2EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant3EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant3EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant4EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant4EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant5EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant5EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant6EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant6EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant7EventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant7EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant1EventJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant1EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectIncompleteDetailsReasonJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectIncompleteDetailsReasonNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageObjectRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectImageFileDetailJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectImageFileDetailNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectImageUrlDetailJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectImageUrlDetailNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentTextObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentTextObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentRefusalObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageContentRefusalObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant2EventJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant2EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant3EventJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant3EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaObjectDeltaRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaObjectDeltaRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentRefusalObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentRefusalObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant4EventJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant4EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant5EventJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant5EventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ErrorEventEventJsonConverter), + typeof(global::OpenAI.JsonConverters.ErrorEventEventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DoneEventEventJsonConverter), + typeof(global::OpenAI.JsonConverters.DoneEventEventNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DoneEventDataJsonConverter), + typeof(global::OpenAI.JsonConverters.DoneEventDataNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AudioResponseFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.AudioResponseFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AuditLogEventTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AuditLogEventTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AuditLogActorTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AuditLogActorTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AuditLogActorApiKeyTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AuditLogActorApiKeyTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.AutoChunkingStrategyRequestParamTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.AutoChunkingStrategyRequestParamTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.BatchObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.BatchObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.BatchStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.BatchStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.BatchRequestInputMethodJsonConverter), + typeof(global::OpenAI.JsonConverters.BatchRequestInputMethodNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallChunkTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallChunkTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionModalitieJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionModalitieNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionNamedToolChoiceTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionNamedToolChoiceTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartTextTypeJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartTextTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartRefusalTypeJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartRefusalTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestFunctionMessageRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestFunctionMessageRoleNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestSystemMessageRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestSystemMessageRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestUserMessageRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestUserMessageRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestToolMessageRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestToolMessageRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestFunctionMessageRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestFunctionMessageRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionToolTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionToolTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ResponseFormatTextTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ResponseFormatTextTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ResponseFormatJsonObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ResponseFormatJsonObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ResponseFormatJsonSchemaTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ResponseFormatJsonSchemaTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionToolChoiceOptionEnumJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionToolChoiceOptionEnumNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionNamedToolChoiceTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionNamedToolChoiceTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallChunkTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionMessageToolCallChunkTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRoleNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionResponseMessageRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionResponseMessageRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionRoleNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionStreamResponseDeltaRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionStreamResponseDeltaRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionToolTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionToolTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionToolChoiceOptionEnumJsonConverter), + typeof(global::OpenAI.JsonConverters.ChatCompletionToolChoiceOptionEnumNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyRequestParamTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyRequestParamTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateAssistantRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateAssistantRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioFormatNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestServiceTierJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestServiceTierNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestFunctionCallJsonConverter), @@ -75,26 +275,44 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateChatCompletionResponseServiceTierNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateChatCompletionFunctionResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersNEpochsJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersNEpochsNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningIntegrationTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningIntegrationTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListPaginatedFineTuningJobsResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListPaginatedFineTuningJobsResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionStreamResponseChoiceFinishReasonJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionStreamResponseChoiceFinishReasonNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionStreamResponseServiceTierJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionStreamResponseServiceTierNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionStreamResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.CreateChatCompletionStreamResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateCompletionRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateCompletionRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateCompletionResponseChoiceFinishReasonJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateCompletionResponseChoiceFinishReasonNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateCompletionResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateCompletionResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestEncodingFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestEncodingFormatNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.EmbeddingObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.EmbeddingObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateEmbeddingResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateEmbeddingResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFileRequestPurposeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFileRequestPurposeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestIntegrationTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestIntegrationTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateImageEditRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateImageEditRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateImageEditRequestSizeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateImageEditRequestSizeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateImageEditRequestResponseFormatJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateImageEditRequestResponseFormatNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageRequestModelNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageRequestQualityJsonConverter), @@ -105,18 +323,16 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateImageRequestSizeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageRequestStyleJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageRequestStyleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateImageEditRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateImageEditRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateImageEditRequestSizeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateImageEditRequestSizeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateImageEditRequestResponseFormatJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateImageEditRequestResponseFormatNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageVariationRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageVariationRequestModelNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageVariationRequestResponseFormatJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageVariationRequestResponseFormatNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageVariationRequestSizeJsonConverter), typeof(global::OpenAI.JsonConverters.CreateImageVariationRequestSizeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateMessageRequestRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateMessageRequestRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageRequestContentTextObjectTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.MessageRequestContentTextObjectTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeJsonConverter), typeof(global::OpenAI.JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeJsonConverter), @@ -149,368 +365,170 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemJsonConverter), typeof(global::OpenAI.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.OpenAIFileObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.OpenAIFileObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.OpenAIFilePurposeJsonConverter), - typeof(global::OpenAI.JsonConverters.OpenAIFilePurposeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.OpenAIFileStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.OpenAIFileStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFileRequestPurposeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFileRequestPurposeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteFileResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteFileResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateUploadRequestPurposeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateUploadRequestPurposeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestIntegrationTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateFineTuningJobRequestIntegrationTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFineTuningJobEventsResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFineTuningJobEventsResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobCheckpointObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuningJobCheckpointObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFineTuningJobCheckpointsResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListFineTuningJobCheckpointsResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestEncodingFormatJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateEmbeddingRequestEncodingFormatNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.EmbeddingObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.EmbeddingObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateEmbeddingResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateEmbeddingResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AudioResponseFormatJsonConverter), - typeof(global::OpenAI.JsonConverters.AudioResponseFormatNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestTimestampGranularitieJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestTimestampGranularitieNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateTranslationRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateTranslationRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateRunRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateRunRequestModelNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateSpeechRequestModelJsonConverter), typeof(global::OpenAI.JsonConverters.CreateSpeechRequestModelNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateSpeechRequestVoiceJsonConverter), typeof(global::OpenAI.JsonConverters.CreateSpeechRequestVoiceNullableJsonConverter), typeof(global::OpenAI.JsonConverters.CreateSpeechRequestResponseFormatJsonConverter), typeof(global::OpenAI.JsonConverters.CreateSpeechRequestResponseFormatNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UploadStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.UploadStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UploadObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.UploadObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UploadPartObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.UploadPartObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsApiResponseFormatOptionEnumJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsApiResponseFormatOptionEnumNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsCodeTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsCodeTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.FileSearchRankingOptionsRankerJsonConverter), - typeof(global::OpenAI.JsonConverters.FileSearchRankingOptionsRankerNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsFunctionTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsFunctionTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateAssistantRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateAssistantRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteAssistantResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteAssistantResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.TruncationObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.TruncationObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsApiToolChoiceOptionEnumJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsApiToolChoiceOptionEnumNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsNamedToolChoiceTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsNamedToolChoiceTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectRequiredActionTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectRequiredActionTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunToolCallObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunToolCallObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectLastErrorCodeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectLastErrorCodeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectIncompleteDetailsReasonJsonConverter), - typeof(global::OpenAI.JsonConverters.RunObjectIncompleteDetailsReasonNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateRunRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateRunRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateMessageRequestRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateMessageRequestRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectImageFileDetailJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageFileObjectImageFileDetailNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectImageUrlDetailJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentImageUrlObjectImageUrlDetailNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageRequestContentTextObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageRequestContentTextObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateThreadAndRunRequestModelJsonConverter), - typeof(global::OpenAI.JsonConverters.CreateThreadAndRunRequestModelNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ThreadObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ThreadObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteThreadResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteThreadResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectIncompleteDetailsReasonJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectIncompleteDetailsReasonNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageObjectRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentTextObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentTextObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentRefusalObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageContentRefusalObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaObjectDeltaRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaObjectDeltaRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentRefusalObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentRefusalObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteMessageResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteMessageResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsMessageCreationObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsMessageCreationObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectLastErrorCodeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepObjectLastErrorCodeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreExpirationAfterAnchorJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreExpirationAfterAnchorNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreObjectStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreObjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AutoChunkingStrategyRequestParamTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AutoChunkingStrategyRequestParamTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyRequestParamTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyRequestParamTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteVectorStoreResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.DeleteVectorStoreResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectLastErrorCodeJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectLastErrorCodeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyResponseParamTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyResponseParamTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.OtherChunkingStrategyResponseParamTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.OtherChunkingStrategyResponseParamTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateThreadAndRunRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateThreadAndRunRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestTimestampGranularitieJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateTranscriptionRequestTimestampGranularitieNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateTranslationRequestModelJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateTranslationRequestModelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateUploadRequestPurposeJsonConverter), + typeof(global::OpenAI.JsonConverters.CreateUploadRequestPurposeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreExpirationAfterAnchorJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreExpirationAfterAnchorNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteAssistantResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteAssistantResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteFileResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteFileResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteMessageResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteMessageResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteThreadResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteThreadResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.DeleteVectorStoreFileResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.DeleteVectorStoreFileResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ThreadStreamEventVariant1EventJsonConverter), - typeof(global::OpenAI.JsonConverters.ThreadStreamEventVariant1EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant1EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant1EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant2EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant2EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant3EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant3EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant4EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant4EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant5EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant5EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant6EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant6EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant7EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant7EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant8EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant8EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant9EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant9EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant10EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventVariant10EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant1EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant1EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant2EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant2EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant3EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant3EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant4EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant4EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant5EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant5EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant6EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant6EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant7EventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventVariant7EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant1EventJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant1EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant2EventJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant2EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant3EventJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant3EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant4EventJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant4EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant5EventJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventVariant5EventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ErrorEventEventJsonConverter), - typeof(global::OpenAI.JsonConverters.ErrorEventEventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DoneEventEventJsonConverter), - typeof(global::OpenAI.JsonConverters.DoneEventEventNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.DoneEventDataJsonConverter), - typeof(global::OpenAI.JsonConverters.DoneEventDataNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.BatchObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.BatchObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.BatchStatusJsonConverter), - typeof(global::OpenAI.JsonConverters.BatchStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.BatchRequestInputMethodJsonConverter), - typeof(global::OpenAI.JsonConverters.BatchRequestInputMethodNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListBatchesResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListBatchesResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AuditLogActorApiKeyTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AuditLogActorApiKeyTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AuditLogActorTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AuditLogActorTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AuditLogEventTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.AuditLogEventTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListAuditLogsResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ListAuditLogsResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteVectorStoreResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.DeleteVectorStoreResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningIntegrationTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningIntegrationTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersNEpochsJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobHyperparametersNEpochsNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobCheckpointObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobCheckpointObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventLevelNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.FineTuningJobEventObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteObjectJsonConverter), typeof(global::OpenAI.JsonConverters.InviteObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteRoleJsonConverter), typeof(global::OpenAI.JsonConverters.InviteRoleNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteStatusJsonConverter), typeof(global::OpenAI.JsonConverters.InviteStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.InviteDeleteResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.InviteDeleteResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteListResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.InviteListResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.InviteRequestRoleJsonConverter), typeof(global::OpenAI.JsonConverters.InviteRequestRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.InviteDeleteResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.InviteDeleteResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UserObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.UserObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UserRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.UserRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UserListResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.UserListResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UserRoleUpdateRequestRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.UserRoleUpdateRequestRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.UserDeleteResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.UserDeleteResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListAuditLogsResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListAuditLogsResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListBatchesResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListBatchesResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.OpenAIFileObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.OpenAIFileObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.OpenAIFilePurposeJsonConverter), + typeof(global::OpenAI.JsonConverters.OpenAIFilePurposeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.OpenAIFileStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.OpenAIFileStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFilesResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFineTuningJobCheckpointsResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFineTuningJobCheckpointsResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFineTuningJobEventsResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListFineTuningJobEventsResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListModelsResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListModelsResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ModelObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ModelObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListPaginatedFineTuningJobsResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ListPaginatedFineTuningJobsResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectLastErrorCodeJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileObjectLastErrorCodeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyResponseParamTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.StaticChunkingStrategyResponseParamTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.OtherChunkingStrategyResponseParamTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.OtherChunkingStrategyResponseParamTypeNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreObjectStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreObjectStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectObjectJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectStatusJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectStatusNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectListResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectListResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyOwnerTypeJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyOwnerTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectUserObjectJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectUserObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectUserRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectUserRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectUserCreateRequestRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectUserCreateRequestRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectUserUpdateRequestRoleJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectUserUpdateRequestRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectUserDeleteResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectUserDeleteResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountObjectJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectServiceAccountListResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectServiceAccountListResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyDeleteResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyDeleteResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyListResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectApiKeyListResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectListResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectListResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectServiceAccountApiKeyObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectServiceAccountApiKeyObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountCreateResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountCreateResponseObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountCreateResponseRoleJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountCreateResponseRoleNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectServiceAccountApiKeyObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectServiceAccountApiKeyObjectNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountDeleteResponseObjectJsonConverter), typeof(global::OpenAI.JsonConverters.ProjectServiceAccountDeleteResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyOwnerTypeJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyOwnerTypeNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyListResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyListResponseObjectNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyDeleteResponseObjectJsonConverter), - typeof(global::OpenAI.JsonConverters.ProjectApiKeyDeleteResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectServiceAccountListResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectServiceAccountListResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectUserCreateRequestRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectUserCreateRequestRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectUserDeleteResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectUserDeleteResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectUserUpdateRequestRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.ProjectUserUpdateRequestRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeClientEventResponseCreateResponseMaxOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeClientEventSessionUpdateSessionMaxOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionCreatedSessionMaxOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensJsonConverter), + typeof(global::OpenAI.JsonConverters.RealtimeServerEventSessionUpdatedSessionMaxOutputTokensNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UploadStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.UploadStatusNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UploadObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.UploadObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UploadPartObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.UploadPartObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UserObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.UserObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UserRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.UserRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UserDeleteResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.UserDeleteResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UserListResponseObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.UserListResponseObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.UserRoleUpdateRequestRoleJsonConverter), + typeof(global::OpenAI.JsonConverters.UserRoleUpdateRequestRoleNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectObjectJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectObjectNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectStatusJsonConverter), + typeof(global::OpenAI.JsonConverters.VectorStoreFileBatchObjectStatusNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventTypeJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventTypeNullableJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeAudioFormatJsonConverter), @@ -647,35 +665,35 @@ namespace OpenAI typeof(global::OpenAI.JsonConverters.GetRunStepIncludeItemNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListVectorStoresOrderJsonConverter), typeof(global::OpenAI.JsonConverters.ListVectorStoresOrderNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesOrderJsonConverter), - typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesOrderNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesFilterJsonConverter), - typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesFilterNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListFilesInVectorStoreBatchOrderJsonConverter), typeof(global::OpenAI.JsonConverters.ListFilesInVectorStoreBatchOrderNullableJsonConverter), typeof(global::OpenAI.JsonConverters.ListFilesInVectorStoreBatchFilterJsonConverter), typeof(global::OpenAI.JsonConverters.ListFilesInVectorStoreBatchFilterNullableJsonConverter), - typeof(global::OpenAI.JsonConverters.AnyOfJsonConverterFactory2), - typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory4), + typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesOrderJsonConverter), + typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesOrderNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesFilterJsonConverter), + typeof(global::OpenAI.JsonConverters.ListVectorStoreFilesFilterNullableJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory3), + typeof(global::OpenAI.JsonConverters.AssistantsApiResponseFormatOptionJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantStreamEventJsonConverter), + typeof(global::OpenAI.JsonConverters.ThreadStreamEventJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStreamEventJsonConverter), + typeof(global::OpenAI.JsonConverters.AssistantsApiToolChoiceOptionJsonConverter), + typeof(global::OpenAI.JsonConverters.RunStepStreamEventJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2), + typeof(global::OpenAI.JsonConverters.MessageStreamEventJsonConverter), + typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory4), + typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageContentPartJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestSystemMessageContentPartJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestUserMessageContentPartJsonConverter), - typeof(global::OpenAI.JsonConverters.ChatCompletionRequestAssistantMessageContentPartJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionRequestToolMessageContentPartJsonConverter), - typeof(global::OpenAI.JsonConverters.FineTuneChatCompletionRequestAssistantMessageJsonConverter), typeof(global::OpenAI.JsonConverters.ChatCompletionToolChoiceOptionJsonConverter), - typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory3), + typeof(global::OpenAI.JsonConverters.ChunkingStrategyRequestParamJsonConverter), + typeof(global::OpenAI.JsonConverters.AnyOfJsonConverterFactory2), + typeof(global::OpenAI.JsonConverters.FineTuneChatCompletionRequestAssistantMessageJsonConverter), typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory1), typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory5), - typeof(global::OpenAI.JsonConverters.AssistantsApiResponseFormatOptionJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantsApiToolChoiceOptionJsonConverter), - typeof(global::OpenAI.JsonConverters.ChunkingStrategyRequestParamJsonConverter), - typeof(global::OpenAI.JsonConverters.AssistantStreamEventJsonConverter), - typeof(global::OpenAI.JsonConverters.ThreadStreamEventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStreamEventJsonConverter), - typeof(global::OpenAI.JsonConverters.RunStepStreamEventJsonConverter), - typeof(global::OpenAI.JsonConverters.MessageStreamEventJsonConverter), typeof(global::OpenAI.JsonConverters.RealtimeServerEventJsonConverter), })] diff --git a/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs b/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs index 97242074..5bf5a38d 100644 --- a/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs +++ b/src/libs/OpenAI/Generated/JsonSerializerContextTypes.g.cs @@ -18,3610 +18,3998 @@ public sealed partial class JsonSerializerContextTypes /// /// /// - public global::OpenAI.Error? Type0 { get; set; } + public global::OpenAI.AddUploadPartRequest? Type0 { get; set; } /// /// /// - public string? Type1 { get; set; } + public byte[]? Type1 { get; set; } /// /// /// - public global::OpenAI.ErrorResponse? Type2 { get; set; } + public global::OpenAI.AssistantObject? Type2 { get; set; } /// /// /// - public global::OpenAI.ListModelsResponse? Type3 { get; set; } + public string? Type3 { get; set; } /// /// /// - public global::OpenAI.ListModelsResponseObject? Type4 { get; set; } + public global::OpenAI.AssistantObjectObject? Type4 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type5 { get; set; } + public global::System.DateTimeOffset? Type5 { get; set; } /// /// /// - public global::OpenAI.Model12? Type6 { get; set; } + public global::System.Collections.Generic.IList>? Type6 { get; set; } /// /// /// - public global::System.DateTimeOffset? Type7 { get; set; } + public global::OpenAI.OneOf? Type7 { get; set; } /// /// /// - public global::OpenAI.ModelObject? Type8 { get; set; } + public global::OpenAI.AssistantToolsCode? Type8 { get; set; } /// /// /// - public global::OpenAI.DeleteModelResponse? Type9 { get; set; } + public global::OpenAI.AssistantToolsCodeType? Type9 { get; set; } /// /// /// - public bool? Type10 { get; set; } + public global::OpenAI.AssistantToolsFileSearch? Type10 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionRequest? Type11 { get; set; } + public global::OpenAI.AssistantToolsFileSearchType? Type11 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type12 { get; set; } + public global::OpenAI.AssistantToolsFileSearchFileSearch? Type12 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionRequestModel? Type13 { get; set; } + public int? Type13 { get; set; } /// /// /// - public global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type14 { get; set; } + public global::OpenAI.FileSearchRankingOptions? Type14 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type15 { get; set; } + public global::OpenAI.FileSearchRankingOptionsRanker? Type15 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type16 { get; set; } + public double? Type16 { get; set; } /// /// /// - public int? Type17 { get; set; } + public global::OpenAI.AssistantToolsFunction? Type17 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type18 { get; set; } + public global::OpenAI.AssistantToolsFunctionType? Type18 { get; set; } /// /// /// - public double? Type19 { get; set; } + public global::OpenAI.FunctionObject? Type19 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type20 { get; set; } + public object? Type20 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type21 { get; set; } + public bool? Type21 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamOptions? Type22 { get; set; } + public global::OpenAI.AssistantObjectToolResources? Type22 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponse? Type23 { get; set; } + public global::OpenAI.AssistantObjectToolResourcesCodeInterpreter? Type23 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type24 { get; set; } + public global::System.Collections.Generic.IList? Type24 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoice? Type25 { get; set; } + public global::OpenAI.AssistantObjectToolResourcesFileSearch? Type25 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoiceFinishReason? Type26 { get; set; } + public global::OpenAI.AssistantsApiResponseFormatOption? Type26 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseChoiceLogprobs? Type27 { get; set; } + public global::OpenAI.AssistantsApiResponseFormatOptionEnum? Type27 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type28 { get; set; } + public global::OpenAI.ResponseFormatText? Type28 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type29 { get; set; } + public global::OpenAI.ResponseFormatTextType? Type29 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type30 { get; set; } + public global::OpenAI.ResponseFormatJsonObject? Type30 { get; set; } /// /// /// - public global::OpenAI.CreateCompletionResponseObject? Type31 { get; set; } + public global::OpenAI.ResponseFormatJsonObjectType? Type31 { get; set; } /// /// /// - public global::OpenAI.CompletionUsage? Type32 { get; set; } + public global::OpenAI.ResponseFormatJsonSchema? Type32 { get; set; } /// /// /// - public global::OpenAI.CompletionUsageCompletionTokensDetails? Type33 { get; set; } + public global::OpenAI.ResponseFormatJsonSchemaType? Type33 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartText? Type34 { get; set; } + public global::OpenAI.ResponseFormatJsonSchemaJsonSchema? Type34 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartTextType? Type35 { get; set; } + public global::OpenAI.AssistantStreamEvent? Type35 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImage? Type36 { get; set; } + public global::OpenAI.ThreadStreamEvent? Type36 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImageType? Type37 { get; set; } + public global::OpenAI.ThreadStreamEventVariant1? Type37 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrl? Type38 { get; set; } + public global::OpenAI.ThreadStreamEventVariant1Event? Type38 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrlDetail? Type39 { get; set; } + public global::OpenAI.ThreadObject? Type39 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartRefusal? Type40 { get; set; } + public global::OpenAI.ThreadObjectObject? Type40 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessageContentPartRefusalType? Type41 { get; set; } + public global::OpenAI.ThreadObjectToolResources? Type41 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestMessage? Type42 { get; set; } + public global::OpenAI.ThreadObjectToolResourcesCodeInterpreter? Type42 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestSystemMessage? Type43 { get; set; } + public global::OpenAI.ThreadObjectToolResourcesFileSearch? Type43 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type44 { get; set; } + public global::OpenAI.RunStreamEvent? Type44 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type45 { get; set; } + public global::OpenAI.RunStreamEventVariant1? Type45 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestSystemMessageContentPart? Type46 { get; set; } + public global::OpenAI.RunStreamEventVariant1Event? Type46 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestSystemMessageRole? Type47 { get; set; } + public global::OpenAI.RunObject? Type47 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessage? Type48 { get; set; } + public global::OpenAI.RunObjectObject? Type48 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type49 { get; set; } + public global::OpenAI.RunObjectStatus? Type49 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type50 { get; set; } + public global::OpenAI.RunObjectRequiredAction? Type50 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessageContentPart? Type51 { get; set; } + public global::OpenAI.RunObjectRequiredActionType? Type51 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestUserMessageRole? Type52 { get; set; } + public global::OpenAI.RunObjectRequiredActionSubmitToolOutputs? Type52 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestAssistantMessage? Type53 { get; set; } + public global::System.Collections.Generic.IList? Type53 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type54 { get; set; } + public global::OpenAI.RunToolCallObject? Type54 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type55 { get; set; } + public global::OpenAI.RunToolCallObjectType? Type55 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestAssistantMessageContentPart? Type56 { get; set; } + public global::OpenAI.RunToolCallObjectFunction? Type56 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestAssistantMessageRole? Type57 { get; set; } + public global::OpenAI.RunObjectLastError? Type57 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type58 { get; set; } + public global::OpenAI.RunObjectLastErrorCode? Type58 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionMessageToolCall? Type59 { get; set; } + public global::OpenAI.RunObjectIncompleteDetails? Type59 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionMessageToolCallType? Type60 { get; set; } + public global::OpenAI.RunObjectIncompleteDetailsReason? Type60 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionMessageToolCallFunction? Type61 { get; set; } + public global::OpenAI.RunCompletionUsage? Type61 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestAssistantMessageFunctionCall? Type62 { get; set; } + public global::OpenAI.TruncationObject? Type62 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestToolMessage? Type63 { get; set; } + public global::OpenAI.TruncationObjectType? Type63 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestToolMessageRole? Type64 { get; set; } + public global::OpenAI.AssistantsApiToolChoiceOption? Type64 { get; set; } /// /// /// - public global::OpenAI.OneOf>? Type65 { get; set; } + public global::OpenAI.AssistantsApiToolChoiceOptionEnum? Type65 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type66 { get; set; } + public global::OpenAI.AssistantsNamedToolChoice? Type66 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestToolMessageContentPart? Type67 { get; set; } + public global::OpenAI.AssistantsNamedToolChoiceType? Type67 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestFunctionMessage? Type68 { get; set; } + public global::OpenAI.AssistantsNamedToolChoiceFunction? Type68 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRequestFunctionMessageRole? Type69 { get; set; } + public global::OpenAI.RunStreamEventVariant2? Type69 { get; set; } /// /// /// - public global::OpenAI.FineTuneChatCompletionRequestAssistantMessage? Type70 { get; set; } + public global::OpenAI.RunStreamEventVariant2Event? Type70 { get; set; } /// /// /// - public global::OpenAI.FineTuneChatCompletionRequestAssistantMessageVariant1? Type71 { get; set; } + public global::OpenAI.RunStreamEventVariant3? Type71 { get; set; } /// /// /// - public object? Type72 { get; set; } + public global::OpenAI.RunStreamEventVariant3Event? Type72 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionFunctions? Type73 { get; set; } + public global::OpenAI.RunStreamEventVariant4? Type73 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionFunctionCallOption? Type74 { get; set; } + public global::OpenAI.RunStreamEventVariant4Event? Type74 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionTool? Type75 { get; set; } + public global::OpenAI.RunStreamEventVariant5? Type75 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionToolType? Type76 { get; set; } + public global::OpenAI.RunStreamEventVariant5Event? Type76 { get; set; } /// /// /// - public global::OpenAI.FunctionObject? Type77 { get; set; } + public global::OpenAI.RunStreamEventVariant6? Type77 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatText? Type78 { get; set; } + public global::OpenAI.RunStreamEventVariant6Event? Type78 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatTextType? Type79 { get; set; } + public global::OpenAI.RunStreamEventVariant7? Type79 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatJsonObject? Type80 { get; set; } + public global::OpenAI.RunStreamEventVariant7Event? Type80 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatJsonObjectType? Type81 { get; set; } + public global::OpenAI.RunStreamEventVariant8? Type81 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatJsonSchema? Type82 { get; set; } + public global::OpenAI.RunStreamEventVariant8Event? Type82 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatJsonSchemaType? Type83 { get; set; } + public global::OpenAI.RunStreamEventVariant9? Type83 { get; set; } /// /// /// - public global::OpenAI.ResponseFormatJsonSchemaJsonSchema? Type84 { get; set; } + public global::OpenAI.RunStreamEventVariant9Event? Type84 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionToolChoiceOption? Type85 { get; set; } + public global::OpenAI.RunStreamEventVariant10? Type85 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionToolChoiceOptionEnum? Type86 { get; set; } + public global::OpenAI.RunStreamEventVariant10Event? Type86 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionNamedToolChoice? Type87 { get; set; } + public global::OpenAI.RunStepStreamEvent? Type87 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionNamedToolChoiceType? Type88 { get; set; } + public global::OpenAI.RunStepStreamEventVariant1? Type88 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionNamedToolChoiceFunction? Type89 { get; set; } + public global::OpenAI.RunStepStreamEventVariant1Event? Type89 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionMessageToolCallChunk? Type90 { get; set; } + public global::OpenAI.RunStepObject? Type90 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionMessageToolCallChunkType? Type91 { get; set; } + public global::OpenAI.RunStepObjectObject? Type91 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionMessageToolCallChunkFunction? Type92 { get; set; } + public global::OpenAI.RunStepObjectType? Type92 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionRole? Type93 { get; set; } + public global::OpenAI.RunStepObjectStatus? Type93 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessage? Type94 { get; set; } + public global::OpenAI.OneOf? Type94 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessageRole? Type95 { get; set; } + public global::OpenAI.RunStepDetailsMessageCreationObject? Type95 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionResponseMessageFunctionCall? Type96 { get; set; } + public global::OpenAI.RunStepDetailsMessageCreationObjectType? Type96 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamResponseDelta? Type97 { get; set; } + public global::OpenAI.RunStepDetailsMessageCreationObjectMessageCreation? Type97 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamResponseDeltaFunctionCall? Type98 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsObject? Type98 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type99 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsObjectType? Type99 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionStreamResponseDeltaRole? Type100 { get; set; } + public global::System.Collections.Generic.IList>? Type100 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequest? Type101 { get; set; } + public global::OpenAI.OneOf? Type101 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type102 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeObject? Type102 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type103 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeObjectType? Type103 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestModel? Type104 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeObjectCodeInterpreter? Type104 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type105 { get; set; } + public global::System.Collections.Generic.IList>? Type105 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestServiceTier? Type106 { get; set; } + public global::OpenAI.OneOf? Type106 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type107 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeOutputLogsObject? Type107 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type108 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeOutputLogsObjectType? Type108 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionRequestFunctionCall? Type109 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeOutputImageObject? Type109 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type110 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeOutputImageObjectType? Type110 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponse? Type111 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsCodeOutputImageObjectImage? Type111 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type112 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchObject? Type112 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoice? Type113 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchObjectType? Type113 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoiceFinishReason? Type114 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchObjectFileSearch? Type114 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseChoiceLogprobs? Type115 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchRankingOptionsObject? Type115 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type116 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? Type116 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionTokenLogprob? Type117 { get; set; } + public global::System.Collections.Generic.IList? Type117 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type118 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchResultObject? Type118 { get; set; } /// /// /// - public global::OpenAI.ChatCompletionTokenLogprobTopLogprob? Type119 { get; set; } + public global::System.Collections.Generic.IList? Type119 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseServiceTier? Type120 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchResultObjectContentItem? Type120 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionResponseObject? Type121 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? Type121 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponse? Type122 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFunctionObject? Type122 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type123 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFunctionObjectType? Type123 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponseChoice? Type124 { get; set; } + public global::OpenAI.RunStepDetailsToolCallsFunctionObjectFunction? Type124 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponseChoiceFinishReason? Type125 { get; set; } + public global::OpenAI.RunStepObjectLastError? Type125 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionFunctionResponseObject? Type126 { get; set; } + public global::OpenAI.RunStepObjectLastErrorCode? Type126 { get; set; } /// /// /// - public global::OpenAI.ListPaginatedFineTuningJobsResponse? Type127 { get; set; } + public global::OpenAI.RunStepCompletionUsage? Type127 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type128 { get; set; } + public global::OpenAI.RunStepStreamEventVariant2? Type128 { get; set; } /// /// /// - public global::OpenAI.FineTuningJob? Type129 { get; set; } + public global::OpenAI.RunStepStreamEventVariant2Event? Type129 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobError? Type130 { get; set; } + public global::OpenAI.RunStepStreamEventVariant3? Type130 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobHyperparameters? Type131 { get; set; } + public global::OpenAI.RunStepStreamEventVariant3Event? Type131 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type132 { get; set; } + public global::OpenAI.RunStepDeltaObject? Type132 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobHyperparametersNEpochs? Type133 { get; set; } + public global::OpenAI.RunStepDeltaObjectObject? Type133 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobObject? Type134 { get; set; } + public global::OpenAI.RunStepDeltaObjectDelta? Type134 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobStatus? Type135 { get; set; } + public global::OpenAI.OneOf? Type135 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type136 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsMessageCreationObject? Type136 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type137 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsMessageCreationObjectType? Type137 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegration? Type138 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsMessageCreationObjectMessageCreation? Type138 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegrationType? Type139 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsObject? Type139 { get; set; } /// /// /// - public global::OpenAI.FineTuningIntegrationWandb? Type140 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsObjectType? Type140 { get; set; } /// /// /// - public global::OpenAI.ListPaginatedFineTuningJobsResponseObject? Type141 { get; set; } + public global::System.Collections.Generic.IList>? Type141 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponse? Type142 { get; set; } + public global::OpenAI.OneOf? Type142 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type143 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeObject? Type143 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoice? Type144 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeObjectType? Type144 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoiceLogprobs? Type145 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? Type145 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseChoiceFinishReason? Type146 { get; set; } + public global::System.Collections.Generic.IList>? Type146 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseServiceTier? Type147 { get; set; } + public global::OpenAI.OneOf? Type147 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseObject? Type148 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject? Type148 { get; set; } /// /// /// - public global::OpenAI.CreateChatCompletionStreamResponseUsage? Type149 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType? Type149 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequest? Type150 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputImageObject? Type150 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type151 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType? Type151 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestModel? Type152 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage? Type152 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestQuality? Type153 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsFileSearchObject? Type153 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestResponseFormat? Type154 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsFileSearchObjectType? Type154 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestSize? Type155 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsFunctionObject? Type155 { get; set; } /// /// /// - public global::OpenAI.CreateImageRequestStyle? Type156 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsFunctionObjectType? Type156 { get; set; } /// /// /// - public global::OpenAI.ImagesResponse? Type157 { get; set; } + public global::OpenAI.RunStepDeltaStepDetailsToolCallsFunctionObjectFunction? Type157 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type158 { get; set; } + public global::OpenAI.RunStepStreamEventVariant4? Type158 { get; set; } /// /// /// - public global::OpenAI.Image? Type159 { get; set; } + public global::OpenAI.RunStepStreamEventVariant4Event? Type159 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequest? Type160 { get; set; } + public global::OpenAI.RunStepStreamEventVariant5? Type160 { get; set; } /// /// /// - public byte[]? Type161 { get; set; } + public global::OpenAI.RunStepStreamEventVariant5Event? Type161 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type162 { get; set; } + public global::OpenAI.RunStepStreamEventVariant6? Type162 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestModel? Type163 { get; set; } + public global::OpenAI.RunStepStreamEventVariant6Event? Type163 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestSize? Type164 { get; set; } + public global::OpenAI.RunStepStreamEventVariant7? Type164 { get; set; } /// /// /// - public global::OpenAI.CreateImageEditRequestResponseFormat? Type165 { get; set; } + public global::OpenAI.RunStepStreamEventVariant7Event? Type165 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequest? Type166 { get; set; } + public global::OpenAI.MessageStreamEvent? Type166 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type167 { get; set; } + public global::OpenAI.MessageStreamEventVariant1? Type167 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestModel? Type168 { get; set; } + public global::OpenAI.MessageStreamEventVariant1Event? Type168 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestResponseFormat? Type169 { get; set; } + public global::OpenAI.MessageObject? Type169 { get; set; } /// /// /// - public global::OpenAI.CreateImageVariationRequestSize? Type170 { get; set; } + public global::OpenAI.MessageObjectObject? Type170 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequest? Type171 { get; set; } + public global::OpenAI.MessageObjectStatus? Type171 { get; set; } /// /// /// - public global::OpenAI.OneOf, global::System.Collections.Generic.IList>>? Type172 { get; set; } + public global::OpenAI.MessageObjectIncompleteDetails? Type172 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type173 { get; set; } + public global::OpenAI.MessageObjectIncompleteDetailsReason? Type173 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type174 { get; set; } + public global::OpenAI.MessageObjectRole? Type174 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1? Type175 { get; set; } + public global::System.Collections.Generic.IList>? Type175 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1Type? Type176 { get; set; } + public global::OpenAI.OneOf? Type176 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type177 { get; set; } + public global::OpenAI.MessageContentImageFileObject? Type177 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2? Type178 { get; set; } + public global::OpenAI.MessageContentImageFileObjectType? Type178 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2Type? Type179 { get; set; } + public global::OpenAI.MessageContentImageFileObjectImageFile? Type179 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type180 { get; set; } + public global::OpenAI.MessageContentImageFileObjectImageFileDetail? Type180 { get; set; } /// /// /// - public global::OpenAI.CreateModerationRequestModel? Type181 { get; set; } + public global::OpenAI.MessageContentImageUrlObject? Type181 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponse? Type182 { get; set; } + public global::OpenAI.MessageContentImageUrlObjectType? Type182 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type183 { get; set; } + public global::OpenAI.MessageContentImageUrlObjectImageUrl? Type183 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResult? Type184 { get; set; } + public global::OpenAI.MessageContentImageUrlObjectImageUrlDetail? Type184 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategories? Type185 { get; set; } + public global::OpenAI.MessageContentTextObject? Type185 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryScores? Type186 { get; set; } + public global::OpenAI.MessageContentTextObjectType? Type186 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypes? Type187 { get; set; } + public global::OpenAI.MessageContentTextObjectText? Type187 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type188 { get; set; } + public global::System.Collections.Generic.IList>? Type188 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type189 { get; set; } + public global::OpenAI.OneOf? Type189 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type190 { get; set; } + public global::OpenAI.MessageContentTextAnnotationsFileCitationObject? Type190 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type191 { get; set; } + public global::OpenAI.MessageContentTextAnnotationsFileCitationObjectType? Type191 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type192 { get; set; } + public global::OpenAI.MessageContentTextAnnotationsFileCitationObjectFileCitation? Type192 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type193 { get; set; } + public global::OpenAI.MessageContentTextAnnotationsFilePathObject? Type193 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type194 { get; set; } + public global::OpenAI.MessageContentTextAnnotationsFilePathObjectType? Type194 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type195 { get; set; } + public global::OpenAI.MessageContentTextAnnotationsFilePathObjectFilePath? Type195 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type196 { get; set; } + public global::OpenAI.MessageContentRefusalObject? Type196 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type197 { get; set; } + public global::OpenAI.MessageContentRefusalObjectType? Type197 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type198 { get; set; } + public global::System.Collections.Generic.IList? Type198 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type199 { get; set; } + public global::OpenAI.MessageObjectAttachment? Type199 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type200 { get; set; } + public global::System.Collections.Generic.IList>? Type200 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type201 { get; set; } + public global::OpenAI.OneOf? Type201 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type202 { get; set; } + public global::OpenAI.AssistantToolsFileSearchTypeOnly? Type202 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type203 { get; set; } + public global::OpenAI.AssistantToolsFileSearchTypeOnlyType? Type203 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type204 { get; set; } + public global::OpenAI.MessageStreamEventVariant2? Type204 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type205 { get; set; } + public global::OpenAI.MessageStreamEventVariant2Event? Type205 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type206 { get; set; } + public global::OpenAI.MessageStreamEventVariant3? Type206 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type207 { get; set; } + public global::OpenAI.MessageStreamEventVariant3Event? Type207 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type208 { get; set; } + public global::OpenAI.MessageDeltaObject? Type208 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type209 { get; set; } + public global::OpenAI.MessageDeltaObjectObject? Type209 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type210 { get; set; } + public global::OpenAI.MessageDeltaObjectDelta? Type210 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type211 { get; set; } + public global::OpenAI.MessageDeltaObjectDeltaRole? Type211 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type212 { get; set; } + public global::System.Collections.Generic.IList>? Type212 { get; set; } /// /// /// - public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type213 { get; set; } + public global::OpenAI.OneOf? Type213 { get; set; } /// /// /// - public global::OpenAI.ListFilesResponse? Type214 { get; set; } + public global::OpenAI.MessageDeltaContentImageFileObject? Type214 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type215 { get; set; } + public global::OpenAI.MessageDeltaContentImageFileObjectType? Type215 { get; set; } /// /// /// - public global::OpenAI.OpenAIFile? Type216 { get; set; } + public global::OpenAI.MessageDeltaContentImageFileObjectImageFile? Type216 { get; set; } /// /// /// - public global::OpenAI.OpenAIFileObject? Type217 { get; set; } + public global::OpenAI.MessageDeltaContentImageFileObjectImageFileDetail? Type217 { get; set; } /// /// /// - public global::OpenAI.OpenAIFilePurpose? Type218 { get; set; } + public global::OpenAI.MessageDeltaContentTextObject? Type218 { get; set; } /// /// /// - public global::OpenAI.OpenAIFileStatus? Type219 { get; set; } + public global::OpenAI.MessageDeltaContentTextObjectType? Type219 { get; set; } /// /// /// - public global::OpenAI.ListFilesResponseObject? Type220 { get; set; } + public global::OpenAI.MessageDeltaContentTextObjectText? Type220 { get; set; } /// /// /// - public global::OpenAI.CreateFileRequest? Type221 { get; set; } + public global::System.Collections.Generic.IList>? Type221 { get; set; } /// /// /// - public global::OpenAI.CreateFileRequestPurpose? Type222 { get; set; } + public global::OpenAI.OneOf? Type222 { get; set; } /// /// /// - public global::OpenAI.DeleteFileResponse? Type223 { get; set; } + public global::OpenAI.MessageDeltaContentTextAnnotationsFileCitationObject? Type223 { get; set; } /// /// /// - public global::OpenAI.DeleteFileResponseObject? Type224 { get; set; } + public global::OpenAI.MessageDeltaContentTextAnnotationsFileCitationObjectType? Type224 { get; set; } /// /// /// - public global::OpenAI.CreateUploadRequest? Type225 { get; set; } + public global::OpenAI.MessageDeltaContentTextAnnotationsFileCitationObjectFileCitation? Type225 { get; set; } /// /// /// - public global::OpenAI.CreateUploadRequestPurpose? Type226 { get; set; } + public global::OpenAI.MessageDeltaContentTextAnnotationsFilePathObject? Type226 { get; set; } /// /// /// - public global::OpenAI.AddUploadPartRequest? Type227 { get; set; } + public global::OpenAI.MessageDeltaContentTextAnnotationsFilePathObjectType? Type227 { get; set; } /// /// /// - public global::OpenAI.CompleteUploadRequest? Type228 { get; set; } + public global::OpenAI.MessageDeltaContentTextAnnotationsFilePathObjectFilePath? Type228 { get; set; } /// /// /// - public global::OpenAI.CancelUploadRequest? Type229 { get; set; } + public global::OpenAI.MessageDeltaContentRefusalObject? Type229 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequest? Type230 { get; set; } + public global::OpenAI.MessageDeltaContentRefusalObjectType? Type230 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type231 { get; set; } + public global::OpenAI.MessageDeltaContentImageUrlObject? Type231 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestModel? Type232 { get; set; } + public global::OpenAI.MessageDeltaContentImageUrlObjectType? Type232 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Type233 { get; set; } + public global::OpenAI.MessageDeltaContentImageUrlObjectImageUrl? Type233 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type234 { get; set; } + public global::OpenAI.MessageDeltaContentImageUrlObjectImageUrlDetail? Type234 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersBatchSize? Type235 { get; set; } + public global::OpenAI.MessageStreamEventVariant4? Type235 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type236 { get; set; } + public global::OpenAI.MessageStreamEventVariant4Event? Type236 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type237 { get; set; } + public global::OpenAI.MessageStreamEventVariant5? Type237 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type238 { get; set; } + public global::OpenAI.MessageStreamEventVariant5Event? Type238 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestHyperparametersNEpochs? Type239 { get; set; } + public global::OpenAI.ErrorEvent? Type239 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type240 { get; set; } + public global::OpenAI.ErrorEventEvent? Type240 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegration? Type241 { get; set; } + public global::OpenAI.Error? Type241 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegrationType? Type242 { get; set; } + public global::OpenAI.DoneEvent? Type242 { get; set; } /// /// /// - public global::OpenAI.CreateFineTuningJobRequestIntegrationWandb? Type243 { get; set; } + public global::OpenAI.DoneEventEvent? Type243 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobEventsResponse? Type244 { get; set; } + public global::OpenAI.DoneEventData? Type244 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type245 { get; set; } + public global::OpenAI.AudioResponseFormat? Type245 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEvent? Type246 { get; set; } + public global::OpenAI.AuditLog? Type246 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEventLevel? Type247 { get; set; } + public global::OpenAI.AuditLogEventType? Type247 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobEventObject? Type248 { get; set; } + public global::OpenAI.AuditLogProject? Type248 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobEventsResponseObject? Type249 { get; set; } + public global::OpenAI.AuditLogActor? Type249 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobCheckpointsResponse? Type250 { get; set; } + public global::OpenAI.AuditLogActorType? Type250 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type251 { get; set; } + public global::OpenAI.AuditLogActorSession? Type251 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpoint? Type252 { get; set; } + public global::OpenAI.AuditLogActorUser? Type252 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpointMetrics? Type253 { get; set; } + public global::OpenAI.AuditLogActorApiKey? Type253 { get; set; } /// /// /// - public global::OpenAI.FineTuningJobCheckpointObject? Type254 { get; set; } + public global::OpenAI.AuditLogActorApiKeyType? Type254 { get; set; } /// /// /// - public global::OpenAI.ListFineTuningJobCheckpointsResponseObject? Type255 { get; set; } + public global::OpenAI.AuditLogActorServiceAccount? Type255 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequest? Type256 { get; set; } + public global::OpenAI.AuditLogApiKeyCreated? Type256 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type257 { get; set; } + public global::OpenAI.AuditLogApiKeyCreatedData? Type257 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequestModel? Type258 { get; set; } + public global::OpenAI.AuditLogApiKeyUpdated? Type258 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingRequestEncodingFormat? Type259 { get; set; } + public global::OpenAI.AuditLogApiKeyUpdatedChangesRequested? Type259 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponse? Type260 { get; set; } + public global::OpenAI.AuditLogApiKeyDeleted? Type260 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type261 { get; set; } + public global::OpenAI.AuditLogInviteSent? Type261 { get; set; } /// /// /// - public global::OpenAI.Embedding? Type262 { get; set; } + public global::OpenAI.AuditLogInviteSentData? Type262 { get; set; } /// /// /// - public global::OpenAI.EmbeddingObject? Type263 { get; set; } + public global::OpenAI.AuditLogInviteAccepted? Type263 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponseObject? Type264 { get; set; } + public global::OpenAI.AuditLogInviteDeleted? Type264 { get; set; } /// /// /// - public global::OpenAI.CreateEmbeddingResponseUsage? Type265 { get; set; } + public global::OpenAI.AuditLogLoginFailed? Type265 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequest? Type266 { get; set; } + public global::OpenAI.AuditLogLogoutFailed? Type266 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type267 { get; set; } + public global::OpenAI.AuditLogOrganizationUpdated? Type267 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequestModel? Type268 { get; set; } + public global::OpenAI.AuditLogOrganizationUpdatedChangesRequested? Type268 { get; set; } /// /// /// - public global::OpenAI.AudioResponseFormat? Type269 { get; set; } + public global::OpenAI.AuditLogOrganizationUpdatedChangesRequestedSettings? Type269 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type270 { get; set; } + public global::OpenAI.AuditLogProjectCreated? Type270 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionRequestTimestampGranularitie? Type271 { get; set; } + public global::OpenAI.AuditLogProjectCreatedData? Type271 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionResponseJson? Type272 { get; set; } + public global::OpenAI.AuditLogProjectUpdated? Type272 { get; set; } /// /// /// - public global::OpenAI.TranscriptionSegment? Type273 { get; set; } + public global::OpenAI.AuditLogProjectUpdatedChangesRequested? Type273 { get; set; } /// /// /// - public float? Type274 { get; set; } + public global::OpenAI.AuditLogProjectArchived? Type274 { get; set; } /// /// /// - public global::OpenAI.TranscriptionWord? Type275 { get; set; } + public global::OpenAI.AuditLogServiceAccountCreated? Type275 { get; set; } /// /// /// - public global::OpenAI.CreateTranscriptionResponseVerboseJson? Type276 { get; set; } + public global::OpenAI.AuditLogServiceAccountCreatedData? Type276 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type277 { get; set; } + public global::OpenAI.AuditLogServiceAccountUpdated? Type277 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type278 { get; set; } + public global::OpenAI.AuditLogServiceAccountUpdatedChangesRequested? Type278 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationRequest? Type279 { get; set; } + public global::OpenAI.AuditLogServiceAccountDeleted? Type279 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type280 { get; set; } + public global::OpenAI.AuditLogUserAdded? Type280 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationRequestModel? Type281 { get; set; } + public global::OpenAI.AuditLogUserAddedData? Type281 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationResponseJson? Type282 { get; set; } + public global::OpenAI.AuditLogUserUpdated? Type282 { get; set; } /// /// /// - public global::OpenAI.CreateTranslationResponseVerboseJson? Type283 { get; set; } + public global::OpenAI.AuditLogUserUpdatedChangesRequested? Type283 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequest? Type284 { get; set; } + public global::OpenAI.AuditLogUserDeleted? Type284 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type285 { get; set; } + public global::OpenAI.AutoChunkingStrategyRequestParam? Type285 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestModel? Type286 { get; set; } + public global::OpenAI.AutoChunkingStrategyRequestParamType? Type286 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestVoice? Type287 { get; set; } + public global::OpenAI.Batch? Type287 { get; set; } /// /// /// - public global::OpenAI.CreateSpeechRequestResponseFormat? Type288 { get; set; } + public global::OpenAI.BatchObject? Type288 { get; set; } /// /// /// - public global::OpenAI.Upload? Type289 { get; set; } + public global::OpenAI.BatchErrors? Type289 { get; set; } /// /// /// - public global::OpenAI.UploadStatus? Type290 { get; set; } + public global::System.Collections.Generic.IList? Type290 { get; set; } /// /// /// - public global::OpenAI.UploadObject? Type291 { get; set; } + public global::OpenAI.BatchErrorsDataItem? Type291 { get; set; } /// /// /// - public global::OpenAI.UploadPart? Type292 { get; set; } + public global::OpenAI.BatchStatus? Type292 { get; set; } /// /// /// - public global::OpenAI.UploadPartObject? Type293 { get; set; } + public global::OpenAI.BatchRequestCounts? Type293 { get; set; } /// /// /// - public global::OpenAI.FinetuneChatRequestInput? Type294 { get; set; } + public global::OpenAI.BatchRequestInput? Type294 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type295 { get; set; } + public global::OpenAI.BatchRequestInputMethod? Type295 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type296 { get; set; } + public global::OpenAI.BatchRequestOutput? Type296 { get; set; } /// /// /// - public global::OpenAI.FinetuneCompletionRequestInput? Type297 { get; set; } + public global::OpenAI.BatchRequestOutputResponse? Type297 { get; set; } /// /// /// - public global::OpenAI.RunCompletionUsage? Type298 { get; set; } + public global::OpenAI.BatchRequestOutputError? Type298 { get; set; } /// /// /// - public global::OpenAI.RunStepCompletionUsage? Type299 { get; set; } + public global::OpenAI.CancelUploadRequest? Type299 { get; set; } /// /// /// - public global::OpenAI.AssistantsApiResponseFormatOption? Type300 { get; set; } + public global::OpenAI.ChatCompletionFunctionCallOption? Type300 { get; set; } /// /// /// - public global::OpenAI.AssistantsApiResponseFormatOptionEnum? Type301 { get; set; } + public global::OpenAI.ChatCompletionFunctions? Type301 { get; set; } /// /// /// - public global::OpenAI.AssistantObject? Type302 { get; set; } + public global::OpenAI.ChatCompletionMessageToolCall? Type302 { get; set; } /// /// /// - public global::OpenAI.AssistantObjectObject? Type303 { get; set; } + public global::OpenAI.ChatCompletionMessageToolCallType? Type303 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type304 { get; set; } + public global::OpenAI.ChatCompletionMessageToolCallFunction? Type304 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type305 { get; set; } + public global::OpenAI.ChatCompletionMessageToolCallChunk? Type305 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsCode? Type306 { get; set; } + public global::OpenAI.ChatCompletionMessageToolCallChunkType? Type306 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsCodeType? Type307 { get; set; } + public global::OpenAI.ChatCompletionMessageToolCallChunkFunction? Type307 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFileSearch? Type308 { get; set; } + public global::System.Collections.Generic.IList? Type308 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFileSearchType? Type309 { get; set; } + public global::System.Collections.Generic.IList? Type309 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFileSearchFileSearch? Type310 { get; set; } + public global::OpenAI.ChatCompletionModalitie? Type310 { get; set; } /// /// /// - public global::OpenAI.FileSearchRankingOptions? Type311 { get; set; } + public global::OpenAI.ChatCompletionNamedToolChoice? Type311 { get; set; } /// /// /// - public global::OpenAI.FileSearchRankingOptionsRanker? Type312 { get; set; } + public global::OpenAI.ChatCompletionNamedToolChoiceType? Type312 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFunction? Type313 { get; set; } + public global::OpenAI.ChatCompletionNamedToolChoiceFunction? Type313 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFunctionType? Type314 { get; set; } + public global::OpenAI.ChatCompletionRequestAssistantMessage? Type314 { get; set; } /// /// /// - public global::OpenAI.AssistantObjectToolResources? Type315 { get; set; } + public global::OpenAI.OneOf>? Type315 { get; set; } /// /// /// - public global::OpenAI.AssistantObjectToolResourcesCodeInterpreter? Type316 { get; set; } + public global::System.Collections.Generic.IList? Type316 { get; set; } /// /// /// - public global::OpenAI.AssistantObjectToolResourcesFileSearch? Type317 { get; set; } + public global::OpenAI.ChatCompletionRequestAssistantMessageContentPart? Type317 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequest? Type318 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartText? Type318 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type319 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartTextType? Type319 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestModel? Type320 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartRefusal? Type320 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResources? Type321 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartRefusalType? Type321 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesCodeInterpreter? Type322 { get; set; } + public global::OpenAI.ChatCompletionRequestAssistantMessageRole? Type322 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearch? Type323 { get; set; } + public global::OpenAI.ChatCompletionRequestAssistantMessageAudio? Type323 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type324 { get; set; } + public global::OpenAI.ChatCompletionRequestAssistantMessageFunctionCall? Type324 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStore? Type325 { get; set; } + public global::OpenAI.ChatCompletionRequestFunctionMessage? Type325 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type326 { get; set; } + public global::OpenAI.ChatCompletionRequestFunctionMessageRole? Type326 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type327 { get; set; } + public global::OpenAI.ChatCompletionRequestMessage? Type327 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type328 { get; set; } + public global::OpenAI.ChatCompletionRequestSystemMessage? Type328 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type329 { get; set; } + public global::OpenAI.OneOf>? Type329 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type330 { get; set; } + public global::System.Collections.Generic.IList? Type330 { get; set; } /// /// /// - public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type331 { get; set; } + public global::OpenAI.ChatCompletionRequestSystemMessageContentPart? Type331 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequest? Type332 { get; set; } + public global::OpenAI.ChatCompletionRequestSystemMessageRole? Type332 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResources? Type333 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessage? Type333 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResourcesCodeInterpreter? Type334 { get; set; } + public global::OpenAI.OneOf>? Type334 { get; set; } /// /// /// - public global::OpenAI.ModifyAssistantRequestToolResourcesFileSearch? Type335 { get; set; } + public global::System.Collections.Generic.IList? Type335 { get; set; } /// /// /// - public global::OpenAI.DeleteAssistantResponse? Type336 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessageContentPart? Type336 { get; set; } /// /// /// - public global::OpenAI.DeleteAssistantResponseObject? Type337 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImage? Type337 { get; set; } /// /// /// - public global::OpenAI.ListAssistantsResponse? Type338 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImageType? Type338 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type339 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrl? Type339 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFileSearchTypeOnly? Type340 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartImageImageUrlDetail? Type340 { get; set; } /// /// /// - public global::OpenAI.AssistantToolsFileSearchTypeOnlyType? Type341 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudio? Type341 { get; set; } /// /// /// - public global::OpenAI.TruncationObject? Type342 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioType? Type342 { get; set; } /// /// /// - public global::OpenAI.TruncationObjectType? Type343 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio? Type343 { get; set; } /// /// /// - public global::OpenAI.AssistantsApiToolChoiceOption? Type344 { get; set; } + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? Type344 { get; set; } /// /// /// - public global::OpenAI.AssistantsApiToolChoiceOptionEnum? Type345 { get; set; } + public global::OpenAI.ChatCompletionRequestUserMessageRole? Type345 { get; set; } /// /// /// - public global::OpenAI.AssistantsNamedToolChoice? Type346 { get; set; } + public global::OpenAI.ChatCompletionRequestToolMessage? Type346 { get; set; } /// /// /// - public global::OpenAI.AssistantsNamedToolChoiceType? Type347 { get; set; } + public global::OpenAI.ChatCompletionRequestToolMessageRole? Type347 { get; set; } /// /// /// - public global::OpenAI.AssistantsNamedToolChoiceFunction? Type348 { get; set; } + public global::OpenAI.OneOf>? Type348 { get; set; } /// /// /// - public global::OpenAI.RunObject? Type349 { get; set; } + public global::System.Collections.Generic.IList? Type349 { get; set; } /// /// /// - public global::OpenAI.RunObjectObject? Type350 { get; set; } + public global::OpenAI.ChatCompletionRequestToolMessageContentPart? Type350 { get; set; } /// /// /// - public global::OpenAI.RunObjectStatus? Type351 { get; set; } + public global::OpenAI.ChatCompletionResponseMessage? Type351 { get; set; } /// /// /// - public global::OpenAI.RunObjectRequiredAction? Type352 { get; set; } + public global::OpenAI.ChatCompletionResponseMessageRole? Type352 { get; set; } /// /// /// - public global::OpenAI.RunObjectRequiredActionType? Type353 { get; set; } + public global::OpenAI.ChatCompletionResponseMessageFunctionCall? Type353 { get; set; } /// /// /// - public global::OpenAI.RunObjectRequiredActionSubmitToolOutputs? Type354 { get; set; } + public global::OpenAI.ChatCompletionResponseMessageAudio? Type354 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type355 { get; set; } + public global::OpenAI.ChatCompletionRole? Type355 { get; set; } /// /// /// - public global::OpenAI.RunToolCallObject? Type356 { get; set; } + public global::OpenAI.ChatCompletionStreamOptions? Type356 { get; set; } /// /// /// - public global::OpenAI.RunToolCallObjectType? Type357 { get; set; } + public global::OpenAI.ChatCompletionStreamResponseDelta? Type357 { get; set; } /// /// /// - public global::OpenAI.RunToolCallObjectFunction? Type358 { get; set; } + public global::OpenAI.ChatCompletionStreamResponseDeltaFunctionCall? Type358 { get; set; } /// /// /// - public global::OpenAI.RunObjectLastError? Type359 { get; set; } + public global::System.Collections.Generic.IList? Type359 { get; set; } /// /// /// - public global::OpenAI.RunObjectLastErrorCode? Type360 { get; set; } + public global::OpenAI.ChatCompletionStreamResponseDeltaRole? Type360 { get; set; } /// /// /// - public global::OpenAI.RunObjectIncompleteDetails? Type361 { get; set; } + public global::OpenAI.ChatCompletionTokenLogprob? Type361 { get; set; } /// /// /// - public global::OpenAI.RunObjectIncompleteDetailsReason? Type362 { get; set; } + public global::System.Collections.Generic.IList? Type362 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequest? Type363 { get; set; } + public global::System.Collections.Generic.IList? Type363 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type364 { get; set; } + public global::OpenAI.ChatCompletionTokenLogprobTopLogprob? Type364 { get; set; } /// /// /// - public global::OpenAI.CreateRunRequestModel? Type365 { get; set; } + public global::OpenAI.ChatCompletionTool? Type365 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type366 { get; set; } + public global::OpenAI.ChatCompletionToolType? Type366 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequest? Type367 { get; set; } + public global::OpenAI.ChatCompletionToolChoiceOption? Type367 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestRole? Type368 { get; set; } + public global::OpenAI.ChatCompletionToolChoiceOptionEnum? Type368 { get; set; } /// /// /// - public global::OpenAI.OneOf>>? Type369 { get; set; } + public global::OpenAI.ChunkingStrategyRequestParam? Type369 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type370 { get; set; } + public global::OpenAI.StaticChunkingStrategyRequestParam? Type370 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type371 { get; set; } + public global::OpenAI.StaticChunkingStrategyRequestParamType? Type371 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageFileObject? Type372 { get; set; } + public global::OpenAI.StaticChunkingStrategy? Type372 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageFileObjectType? Type373 { get; set; } + public global::OpenAI.CompleteUploadRequest? Type373 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageFileObjectImageFile? Type374 { get; set; } + public global::OpenAI.CompletionUsage? Type374 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageFileObjectImageFileDetail? Type375 { get; set; } + public global::OpenAI.CompletionUsageCompletionTokensDetails? Type375 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageUrlObject? Type376 { get; set; } + public global::OpenAI.CompletionUsagePromptTokensDetails? Type376 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageUrlObjectType? Type377 { get; set; } + public global::OpenAI.CreateAssistantRequest? Type377 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageUrlObjectImageUrl? Type378 { get; set; } + public global::OpenAI.AnyOf? Type378 { get; set; } /// /// /// - public global::OpenAI.MessageContentImageUrlObjectImageUrlDetail? Type379 { get; set; } + public global::OpenAI.CreateAssistantRequestModel? Type379 { get; set; } /// /// /// - public global::OpenAI.MessageRequestContentTextObject? Type380 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResources? Type380 { get; set; } /// /// /// - public global::OpenAI.MessageRequestContentTextObjectType? Type381 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesCodeInterpreter? Type381 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type382 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearch? Type382 { get; set; } /// /// /// - public global::OpenAI.CreateMessageRequestAttachment? Type383 { get; set; } + public global::System.Collections.Generic.IList? Type383 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type384 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStore? Type384 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type385 { get; set; } + public global::OpenAI.OneOf? Type385 { get; set; } /// /// /// - public global::OpenAI.ListRunsResponse? Type386 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type386 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type387 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type387 { get; set; } /// /// /// - public global::OpenAI.ModifyRunRequest? Type388 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type388 { get; set; } /// /// /// - public global::OpenAI.SubmitToolOutputsRunRequest? Type389 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type389 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type390 { get; set; } + public global::OpenAI.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type390 { get; set; } /// /// /// - public global::OpenAI.SubmitToolOutputsRunRequestToolOutput? Type391 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponse? Type391 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequest? Type392 { get; set; } + public global::System.Collections.Generic.IList? Type392 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequest? Type393 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponseChoice? Type393 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResources? Type394 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponseChoiceFinishReason? Type394 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesCodeInterpreter? Type395 { get; set; } + public global::OpenAI.CreateChatCompletionFunctionResponseObject? Type395 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearch? Type396 { get; set; } + public global::OpenAI.CreateChatCompletionRequest? Type396 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type397 { get; set; } + public global::System.Collections.Generic.IList? Type397 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStore? Type398 { get; set; } + public global::OpenAI.AnyOf? Type398 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type399 { get; set; } + public global::OpenAI.CreateChatCompletionRequestModel? Type399 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type400 { get; set; } + public global::System.Collections.Generic.Dictionary? Type400 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type401 { get; set; } + public global::System.Collections.Generic.Dictionary? Type401 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type402 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudio? Type402 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type403 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudioVoice? Type403 { get; set; } /// /// /// - public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type404 { get; set; } + public global::OpenAI.CreateChatCompletionRequestAudioFormat? Type404 { get; set; } /// /// /// - public global::OpenAI.AnyOf? Type405 { get; set; } + public global::OpenAI.OneOf? Type405 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestModel? Type406 { get; set; } + public global::OpenAI.CreateChatCompletionRequestServiceTier? Type406 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResources? Type407 { get; set; } + public global::OpenAI.OneOf>? Type407 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type408 { get; set; } + public global::System.Collections.Generic.IList? Type408 { get; set; } /// /// /// - public global::OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch? Type409 { get; set; } + public global::OpenAI.OneOf? Type409 { get; set; } /// /// /// - public global::OpenAI.ThreadObject? Type410 { get; set; } + public global::OpenAI.CreateChatCompletionRequestFunctionCall? Type410 { get; set; } /// /// /// - public global::OpenAI.ThreadObjectObject? Type411 { get; set; } + public global::System.Collections.Generic.IList? Type411 { get; set; } /// /// /// - public global::OpenAI.ThreadObjectToolResources? Type412 { get; set; } + public global::OpenAI.CreateChatCompletionResponse? Type412 { get; set; } /// /// /// - public global::OpenAI.ThreadObjectToolResourcesCodeInterpreter? Type413 { get; set; } + public global::System.Collections.Generic.IList? Type413 { get; set; } /// /// /// - public global::OpenAI.ThreadObjectToolResourcesFileSearch? Type414 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoice? Type414 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequest? Type415 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoiceFinishReason? Type415 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResources? Type416 { get; set; } + public global::OpenAI.CreateChatCompletionResponseChoiceLogprobs? Type416 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter? Type417 { get; set; } + public global::System.Collections.Generic.IList? Type417 { get; set; } /// /// /// - public global::OpenAI.ModifyThreadRequestToolResourcesFileSearch? Type418 { get; set; } + public global::OpenAI.CreateChatCompletionResponseServiceTier? Type418 { get; set; } /// /// /// - public global::OpenAI.DeleteThreadResponse? Type419 { get; set; } + public global::OpenAI.CreateChatCompletionResponseObject? Type419 { get; set; } /// /// /// - public global::OpenAI.DeleteThreadResponseObject? Type420 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponse? Type420 { get; set; } /// /// /// - public global::OpenAI.ListThreadsResponse? Type421 { get; set; } + public global::System.Collections.Generic.IList? Type421 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type422 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoice? Type422 { get; set; } /// /// /// - public global::OpenAI.MessageObject? Type423 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoiceLogprobs? Type423 { get; set; } /// /// /// - public global::OpenAI.MessageObjectObject? Type424 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseChoiceFinishReason? Type424 { get; set; } /// /// /// - public global::OpenAI.MessageObjectStatus? Type425 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseServiceTier? Type425 { get; set; } /// /// /// - public global::OpenAI.MessageObjectIncompleteDetails? Type426 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseObject? Type426 { get; set; } /// /// /// - public global::OpenAI.MessageObjectIncompleteDetailsReason? Type427 { get; set; } + public global::OpenAI.CreateChatCompletionStreamResponseUsage? Type427 { get; set; } /// /// /// - public global::OpenAI.MessageObjectRole? Type428 { get; set; } + public global::OpenAI.CreateCompletionRequest? Type428 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type429 { get; set; } + public global::OpenAI.AnyOf? Type429 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type430 { get; set; } + public global::OpenAI.CreateCompletionRequestModel? Type430 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextObject? Type431 { get; set; } + public global::OpenAI.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type431 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextObjectType? Type432 { get; set; } + public global::System.Collections.Generic.IList>? Type432 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextObjectText? Type433 { get; set; } + public global::OpenAI.CreateCompletionResponse? Type433 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type434 { get; set; } + public global::System.Collections.Generic.IList? Type434 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type435 { get; set; } + public global::OpenAI.CreateCompletionResponseChoice? Type435 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextAnnotationsFileCitationObject? Type436 { get; set; } + public global::OpenAI.CreateCompletionResponseChoiceFinishReason? Type436 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextAnnotationsFileCitationObjectType? Type437 { get; set; } + public global::OpenAI.CreateCompletionResponseChoiceLogprobs? Type437 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextAnnotationsFileCitationObjectFileCitation? Type438 { get; set; } + public global::System.Collections.Generic.IList? Type438 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextAnnotationsFilePathObject? Type439 { get; set; } + public global::System.Collections.Generic.IList>? Type439 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextAnnotationsFilePathObjectType? Type440 { get; set; } + public global::System.Collections.Generic.Dictionary? Type440 { get; set; } /// /// /// - public global::OpenAI.MessageContentTextAnnotationsFilePathObjectFilePath? Type441 { get; set; } + public global::OpenAI.CreateCompletionResponseObject? Type441 { get; set; } /// /// /// - public global::OpenAI.MessageContentRefusalObject? Type442 { get; set; } + public global::OpenAI.CreateEmbeddingRequest? Type442 { get; set; } /// /// /// - public global::OpenAI.MessageContentRefusalObjectType? Type443 { get; set; } + public global::OpenAI.AnyOf? Type443 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type444 { get; set; } + public global::OpenAI.CreateEmbeddingRequestModel? Type444 { get; set; } /// /// /// - public global::OpenAI.MessageObjectAttachment? Type445 { get; set; } + public global::OpenAI.CreateEmbeddingRequestEncodingFormat? Type445 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaObject? Type446 { get; set; } + public global::OpenAI.CreateEmbeddingResponse? Type446 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaObjectObject? Type447 { get; set; } + public global::System.Collections.Generic.IList? Type447 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaObjectDelta? Type448 { get; set; } + public global::OpenAI.Embedding? Type448 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaObjectDeltaRole? Type449 { get; set; } + public global::OpenAI.EmbeddingObject? Type449 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type450 { get; set; } + public global::OpenAI.CreateEmbeddingResponseObject? Type450 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type451 { get; set; } + public global::OpenAI.CreateEmbeddingResponseUsage? Type451 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageFileObject? Type452 { get; set; } + public global::OpenAI.CreateFileRequest? Type452 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageFileObjectType? Type453 { get; set; } + public global::OpenAI.CreateFileRequestPurpose? Type453 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageFileObjectImageFile? Type454 { get; set; } + public global::OpenAI.CreateFineTuningJobRequest? Type454 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageFileObjectImageFileDetail? Type455 { get; set; } + public global::OpenAI.AnyOf? Type455 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextObject? Type456 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestModel? Type456 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextObjectType? Type457 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparameters? Type457 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextObjectText? Type458 { get; set; } + public global::OpenAI.OneOf? Type458 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type459 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersBatchSize? Type459 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type460 { get; set; } + public global::OpenAI.OneOf? Type460 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextAnnotationsFileCitationObject? Type461 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type461 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextAnnotationsFileCitationObjectType? Type462 { get; set; } + public global::OpenAI.OneOf? Type462 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextAnnotationsFileCitationObjectFileCitation? Type463 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestHyperparametersNEpochs? Type463 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextAnnotationsFilePathObject? Type464 { get; set; } + public global::System.Collections.Generic.IList? Type464 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextAnnotationsFilePathObjectType? Type465 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegration? Type465 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentTextAnnotationsFilePathObjectFilePath? Type466 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegrationType? Type466 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentRefusalObject? Type467 { get; set; } + public global::OpenAI.CreateFineTuningJobRequestIntegrationWandb? Type467 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentRefusalObjectType? Type468 { get; set; } + public global::OpenAI.CreateImageEditRequest? Type468 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageUrlObject? Type469 { get; set; } + public global::OpenAI.AnyOf? Type469 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageUrlObjectType? Type470 { get; set; } + public global::OpenAI.CreateImageEditRequestModel? Type470 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageUrlObjectImageUrl? Type471 { get; set; } + public global::OpenAI.CreateImageEditRequestSize? Type471 { get; set; } /// /// /// - public global::OpenAI.MessageDeltaContentImageUrlObjectImageUrlDetail? Type472 { get; set; } + public global::OpenAI.CreateImageEditRequestResponseFormat? Type472 { get; set; } /// /// /// - public global::OpenAI.ModifyMessageRequest? Type473 { get; set; } + public global::OpenAI.CreateImageRequest? Type473 { get; set; } /// /// /// - public global::OpenAI.DeleteMessageResponse? Type474 { get; set; } + public global::OpenAI.AnyOf? Type474 { get; set; } /// /// /// - public global::OpenAI.DeleteMessageResponseObject? Type475 { get; set; } + public global::OpenAI.CreateImageRequestModel? Type475 { get; set; } /// /// /// - public global::OpenAI.ListMessagesResponse? Type476 { get; set; } + public global::OpenAI.CreateImageRequestQuality? Type476 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type477 { get; set; } + public global::OpenAI.CreateImageRequestResponseFormat? Type477 { get; set; } /// /// /// - public global::OpenAI.RunStepObject? Type478 { get; set; } + public global::OpenAI.CreateImageRequestSize? Type478 { get; set; } /// /// /// - public global::OpenAI.RunStepObjectObject? Type479 { get; set; } + public global::OpenAI.CreateImageRequestStyle? Type479 { get; set; } /// /// /// - public global::OpenAI.RunStepObjectType? Type480 { get; set; } + public global::OpenAI.CreateImageVariationRequest? Type480 { get; set; } /// /// /// - public global::OpenAI.RunStepObjectStatus? Type481 { get; set; } + public global::OpenAI.AnyOf? Type481 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type482 { get; set; } + public global::OpenAI.CreateImageVariationRequestModel? Type482 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsMessageCreationObject? Type483 { get; set; } + public global::OpenAI.CreateImageVariationRequestResponseFormat? Type483 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsMessageCreationObjectType? Type484 { get; set; } + public global::OpenAI.CreateImageVariationRequestSize? Type484 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsMessageCreationObjectMessageCreation? Type485 { get; set; } + public global::OpenAI.CreateMessageRequest? Type485 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsObject? Type486 { get; set; } + public global::OpenAI.CreateMessageRequestRole? Type486 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsObjectType? Type487 { get; set; } + public global::OpenAI.OneOf>>? Type487 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type488 { get; set; } + public global::System.Collections.Generic.IList>? Type488 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type489 { get; set; } + public global::OpenAI.OneOf? Type489 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeObject? Type490 { get; set; } + public global::OpenAI.MessageRequestContentTextObject? Type490 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeObjectType? Type491 { get; set; } + public global::OpenAI.MessageRequestContentTextObjectType? Type491 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeObjectCodeInterpreter? Type492 { get; set; } + public global::System.Collections.Generic.IList? Type492 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type493 { get; set; } + public global::OpenAI.CreateMessageRequestAttachment? Type493 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type494 { get; set; } + public global::OpenAI.CreateModerationRequest? Type494 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeOutputLogsObject? Type495 { get; set; } + public global::OpenAI.OneOf, global::System.Collections.Generic.IList>>? Type495 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeOutputLogsObjectType? Type496 { get; set; } + public global::System.Collections.Generic.IList>? Type496 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeOutputImageObject? Type497 { get; set; } + public global::OpenAI.OneOf? Type497 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeOutputImageObjectType? Type498 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1? Type498 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsCodeOutputImageObjectImage? Type499 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1Type? Type499 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchObject? Type500 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type500 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchObjectType? Type501 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2? Type501 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchObjectFileSearch? Type502 { get; set; } + public global::OpenAI.CreateModerationRequestInputVariant3ItemVariant2Type? Type502 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchRankingOptionsObject? Type503 { get; set; } + public global::OpenAI.AnyOf? Type503 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? Type504 { get; set; } + public global::OpenAI.CreateModerationRequestModel? Type504 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type505 { get; set; } + public global::OpenAI.CreateModerationResponse? Type505 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchResultObject? Type506 { get; set; } + public global::System.Collections.Generic.IList? Type506 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type507 { get; set; } + public global::OpenAI.CreateModerationResponseResult? Type507 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchResultObjectContentItem? Type508 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategories? Type508 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? Type509 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryScores? Type509 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFunctionObject? Type510 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypes? Type510 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFunctionObjectType? Type511 { get; set; } + public global::System.Collections.Generic.IList? Type511 { get; set; } /// /// /// - public global::OpenAI.RunStepDetailsToolCallsFunctionObjectFunction? Type512 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type512 { get; set; } /// /// /// - public global::OpenAI.RunStepObjectLastError? Type513 { get; set; } + public global::System.Collections.Generic.IList? Type513 { get; set; } /// /// /// - public global::OpenAI.RunStepObjectLastErrorCode? Type514 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type514 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaObject? Type515 { get; set; } + public global::System.Collections.Generic.IList? Type515 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaObjectObject? Type516 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type516 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaObjectDelta? Type517 { get; set; } + public global::System.Collections.Generic.IList? Type517 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type518 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type518 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsMessageCreationObject? Type519 { get; set; } + public global::System.Collections.Generic.IList? Type519 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsMessageCreationObjectType? Type520 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type520 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsMessageCreationObjectMessageCreation? Type521 { get; set; } + public global::System.Collections.Generic.IList? Type521 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsObject? Type522 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type522 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsObjectType? Type523 { get; set; } + public global::System.Collections.Generic.IList? Type523 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type524 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type524 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type525 { get; set; } + public global::System.Collections.Generic.IList? Type525 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeObject? Type526 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type526 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeObjectType? Type527 { get; set; } + public global::System.Collections.Generic.IList? Type527 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? Type528 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type528 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type529 { get; set; } + public global::System.Collections.Generic.IList? Type529 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type530 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type530 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject? Type531 { get; set; } + public global::System.Collections.Generic.IList? Type531 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType? Type532 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type532 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputImageObject? Type533 { get; set; } + public global::System.Collections.Generic.IList? Type533 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType? Type534 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type534 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage? Type535 { get; set; } + public global::System.Collections.Generic.IList? Type535 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsFileSearchObject? Type536 { get; set; } + public global::OpenAI.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type536 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsFileSearchObjectType? Type537 { get; set; } + public global::OpenAI.CreateRunRequest? Type537 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsFunctionObject? Type538 { get; set; } + public global::OpenAI.AnyOf? Type538 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsFunctionObjectType? Type539 { get; set; } + public global::OpenAI.CreateRunRequestModel? Type539 { get; set; } /// /// /// - public global::OpenAI.RunStepDeltaStepDetailsToolCallsFunctionObjectFunction? Type540 { get; set; } + public global::System.Collections.Generic.IList? Type540 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsResponse? Type541 { get; set; } + public global::OpenAI.CreateSpeechRequest? Type541 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type542 { get; set; } + public global::OpenAI.AnyOf? Type542 { get; set; } /// /// /// - public global::OpenAI.VectorStoreExpirationAfter? Type543 { get; set; } + public global::OpenAI.CreateSpeechRequestModel? Type543 { get; set; } /// /// /// - public global::OpenAI.VectorStoreExpirationAfterAnchor? Type544 { get; set; } + public global::OpenAI.CreateSpeechRequestVoice? Type544 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObject? Type545 { get; set; } + public global::OpenAI.CreateSpeechRequestResponseFormat? Type545 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectObject? Type546 { get; set; } + public global::OpenAI.CreateThreadAndRunRequest? Type546 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectFileCounts? Type547 { get; set; } + public global::OpenAI.CreateThreadRequest? Type547 { get; set; } /// /// /// - public global::OpenAI.VectorStoreObjectStatus? Type548 { get; set; } + public global::OpenAI.CreateThreadRequestToolResources? Type548 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreRequest? Type549 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesCodeInterpreter? Type549 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type550 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearch? Type550 { get; set; } /// /// /// - public global::OpenAI.AutoChunkingStrategyRequestParam? Type551 { get; set; } + public global::System.Collections.Generic.IList? Type551 { get; set; } /// /// /// - public global::OpenAI.AutoChunkingStrategyRequestParamType? Type552 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStore? Type552 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyRequestParam? Type553 { get; set; } + public global::OpenAI.OneOf? Type553 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyRequestParamType? Type554 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type554 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategy? Type555 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type555 { get; set; } /// /// /// - public global::OpenAI.UpdateVectorStoreRequest? Type556 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type556 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoresResponse? Type557 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type557 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type558 { get; set; } + public global::OpenAI.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type558 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreResponse? Type559 { get; set; } + public global::OpenAI.AnyOf? Type559 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreResponseObject? Type560 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestModel? Type560 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObject? Type561 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResources? Type561 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectObject? Type562 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type562 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectStatus? Type563 { get; set; } + public global::OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch? Type563 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectLastError? Type564 { get; set; } + public global::OpenAI.CreateTranscriptionRequest? Type564 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileObjectLastErrorCode? Type565 { get; set; } + public global::OpenAI.AnyOf? Type565 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type566 { get; set; } + public global::OpenAI.CreateTranscriptionRequestModel? Type566 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyResponseParam? Type567 { get; set; } + public global::System.Collections.Generic.IList? Type567 { get; set; } /// /// /// - public global::OpenAI.StaticChunkingStrategyResponseParamType? Type568 { get; set; } + public global::OpenAI.CreateTranscriptionRequestTimestampGranularitie? Type568 { get; set; } /// /// /// - public global::OpenAI.OtherChunkingStrategyResponseParam? Type569 { get; set; } + public global::OpenAI.CreateTranscriptionResponseJson? Type569 { get; set; } /// /// /// - public global::OpenAI.OtherChunkingStrategyResponseParamType? Type570 { get; set; } + public global::OpenAI.CreateTranscriptionResponseVerboseJson? Type570 { get; set; } /// /// /// - public global::OpenAI.ChunkingStrategyRequestParam? Type571 { get; set; } + public global::System.Collections.Generic.IList? Type571 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreFileRequest? Type572 { get; set; } + public global::OpenAI.TranscriptionWord? Type572 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesResponse? Type573 { get; set; } + public float? Type573 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type574 { get; set; } + public global::System.Collections.Generic.IList? Type574 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreFileResponse? Type575 { get; set; } + public global::OpenAI.TranscriptionSegment? Type575 { get; set; } /// /// /// - public global::OpenAI.DeleteVectorStoreFileResponseObject? Type576 { get; set; } + public global::OpenAI.CreateTranslationRequest? Type576 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObject? Type577 { get; set; } + public global::OpenAI.AnyOf? Type577 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectObject? Type578 { get; set; } + public global::OpenAI.CreateTranslationRequestModel? Type578 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectStatus? Type579 { get; set; } + public global::OpenAI.CreateTranslationResponseJson? Type579 { get; set; } /// /// /// - public global::OpenAI.VectorStoreFileBatchObjectFileCounts? Type580 { get; set; } + public global::OpenAI.CreateTranslationResponseVerboseJson? Type580 { get; set; } /// /// /// - public global::OpenAI.CreateVectorStoreFileBatchRequest? Type581 { get; set; } + public global::OpenAI.CreateUploadRequest? Type581 { get; set; } /// /// /// - public global::OpenAI.AssistantStreamEvent? Type582 { get; set; } + public global::OpenAI.CreateUploadRequestPurpose? Type582 { get; set; } /// /// /// - public global::OpenAI.ThreadStreamEvent? Type583 { get; set; } + public global::OpenAI.CreateVectorStoreFileBatchRequest? Type583 { get; set; } /// /// /// - public global::OpenAI.ThreadStreamEventVariant1? Type584 { get; set; } + public global::OpenAI.CreateVectorStoreFileRequest? Type584 { get; set; } /// /// /// - public global::OpenAI.ThreadStreamEventVariant1Event? Type585 { get; set; } + public global::OpenAI.CreateVectorStoreRequest? Type585 { get; set; } /// /// /// - public global::OpenAI.RunStreamEvent? Type586 { get; set; } + public global::OpenAI.VectorStoreExpirationAfter? Type586 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant1? Type587 { get; set; } + public global::OpenAI.VectorStoreExpirationAfterAnchor? Type587 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant1Event? Type588 { get; set; } + public global::OpenAI.OneOf? Type588 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant2? Type589 { get; set; } + public global::OpenAI.DefaultProjectErrorResponse? Type589 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant2Event? Type590 { get; set; } + public global::OpenAI.DeleteAssistantResponse? Type590 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant3? Type591 { get; set; } + public global::OpenAI.DeleteAssistantResponseObject? Type591 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant3Event? Type592 { get; set; } + public global::OpenAI.DeleteFileResponse? Type592 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant4? Type593 { get; set; } + public global::OpenAI.DeleteFileResponseObject? Type593 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant4Event? Type594 { get; set; } + public global::OpenAI.DeleteMessageResponse? Type594 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant5? Type595 { get; set; } + public global::OpenAI.DeleteMessageResponseObject? Type595 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant5Event? Type596 { get; set; } + public global::OpenAI.DeleteModelResponse? Type596 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant6? Type597 { get; set; } + public global::OpenAI.DeleteThreadResponse? Type597 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant6Event? Type598 { get; set; } + public global::OpenAI.DeleteThreadResponseObject? Type598 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant7? Type599 { get; set; } + public global::OpenAI.DeleteVectorStoreFileResponse? Type599 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant7Event? Type600 { get; set; } + public global::OpenAI.DeleteVectorStoreFileResponseObject? Type600 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant8? Type601 { get; set; } + public global::OpenAI.DeleteVectorStoreResponse? Type601 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant8Event? Type602 { get; set; } + public global::OpenAI.DeleteVectorStoreResponseObject? Type602 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant9? Type603 { get; set; } + public global::OpenAI.ErrorResponse? Type603 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant9Event? Type604 { get; set; } + public global::OpenAI.FineTuneChatCompletionRequestAssistantMessage? Type604 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant10? Type605 { get; set; } + public global::OpenAI.FineTuneChatCompletionRequestAssistantMessageVariant1? Type605 { get; set; } /// /// /// - public global::OpenAI.RunStreamEventVariant10Event? Type606 { get; set; } + public global::OpenAI.FineTuningIntegration? Type606 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEvent? Type607 { get; set; } + public global::OpenAI.FineTuningIntegrationType? Type607 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant1? Type608 { get; set; } + public global::OpenAI.FineTuningIntegrationWandb? Type608 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant1Event? Type609 { get; set; } + public global::OpenAI.FineTuningJob? Type609 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant2? Type610 { get; set; } + public global::OpenAI.FineTuningJobError? Type610 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant2Event? Type611 { get; set; } + public global::OpenAI.FineTuningJobHyperparameters? Type611 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant3? Type612 { get; set; } + public global::OpenAI.OneOf? Type612 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant3Event? Type613 { get; set; } + public global::OpenAI.FineTuningJobHyperparametersNEpochs? Type613 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant4? Type614 { get; set; } + public global::OpenAI.FineTuningJobObject? Type614 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant4Event? Type615 { get; set; } + public global::OpenAI.FineTuningJobStatus? Type615 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant5? Type616 { get; set; } + public global::System.Collections.Generic.IList>? Type616 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant5Event? Type617 { get; set; } + public global::OpenAI.OneOf? Type617 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant6? Type618 { get; set; } + public global::OpenAI.FineTuningJobCheckpoint? Type618 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant6Event? Type619 { get; set; } + public global::OpenAI.FineTuningJobCheckpointMetrics? Type619 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant7? Type620 { get; set; } + public global::OpenAI.FineTuningJobCheckpointObject? Type620 { get; set; } /// /// /// - public global::OpenAI.RunStepStreamEventVariant7Event? Type621 { get; set; } + public global::OpenAI.FineTuningJobEvent? Type621 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEvent? Type622 { get; set; } + public global::OpenAI.FineTuningJobEventLevel? Type622 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant1? Type623 { get; set; } + public global::OpenAI.FineTuningJobEventObject? Type623 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant1Event? Type624 { get; set; } + public global::OpenAI.FinetuneChatRequestInput? Type624 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant2? Type625 { get; set; } + public global::System.Collections.Generic.IList>? Type625 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant2Event? Type626 { get; set; } + public global::OpenAI.OneOf? Type626 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant3? Type627 { get; set; } + public global::OpenAI.FinetuneCompletionRequestInput? Type627 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant3Event? Type628 { get; set; } + public global::OpenAI.Image? Type628 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant4? Type629 { get; set; } + public global::OpenAI.ImagesResponse? Type629 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant4Event? Type630 { get; set; } + public global::System.Collections.Generic.IList? Type630 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant5? Type631 { get; set; } + public global::OpenAI.Invite? Type631 { get; set; } /// /// /// - public global::OpenAI.MessageStreamEventVariant5Event? Type632 { get; set; } + public global::OpenAI.InviteObject? Type632 { get; set; } /// /// /// - public global::OpenAI.ErrorEvent? Type633 { get; set; } + public global::OpenAI.InviteRole? Type633 { get; set; } /// /// /// - public global::OpenAI.ErrorEventEvent? Type634 { get; set; } + public global::OpenAI.InviteStatus? Type634 { get; set; } /// /// /// - public global::OpenAI.DoneEvent? Type635 { get; set; } + public global::OpenAI.InviteDeleteResponse? Type635 { get; set; } /// /// /// - public global::OpenAI.DoneEventEvent? Type636 { get; set; } + public global::OpenAI.InviteDeleteResponseObject? Type636 { get; set; } /// /// /// - public global::OpenAI.DoneEventData? Type637 { get; set; } + public global::OpenAI.InviteListResponse? Type637 { get; set; } /// /// /// - public global::OpenAI.Batch? Type638 { get; set; } + public global::OpenAI.InviteListResponseObject? Type638 { get; set; } /// /// /// - public global::OpenAI.BatchObject? Type639 { get; set; } + public global::System.Collections.Generic.IList? Type639 { get; set; } /// /// /// - public global::OpenAI.BatchErrors? Type640 { get; set; } + public global::OpenAI.InviteRequest? Type640 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type641 { get; set; } + public global::OpenAI.InviteRequestRole? Type641 { get; set; } /// /// /// - public global::OpenAI.BatchErrorsDataItem? Type642 { get; set; } + public global::OpenAI.ListAssistantsResponse? Type642 { get; set; } /// /// /// - public global::OpenAI.BatchStatus? Type643 { get; set; } + public global::System.Collections.Generic.IList? Type643 { get; set; } /// /// /// - public global::OpenAI.BatchRequestCounts? Type644 { get; set; } + public global::OpenAI.ListAuditLogsResponse? Type644 { get; set; } /// /// /// - public global::OpenAI.BatchRequestInput? Type645 { get; set; } + public global::OpenAI.ListAuditLogsResponseObject? Type645 { get; set; } /// /// /// - public global::OpenAI.BatchRequestInputMethod? Type646 { get; set; } + public global::System.Collections.Generic.IList? Type646 { get; set; } /// /// /// - public global::OpenAI.BatchRequestOutput? Type647 { get; set; } + public global::OpenAI.ListBatchesResponse? Type647 { get; set; } /// /// /// - public global::OpenAI.BatchRequestOutputResponse? Type648 { get; set; } + public global::System.Collections.Generic.IList? Type648 { get; set; } /// /// /// - public global::OpenAI.BatchRequestOutputError? Type649 { get; set; } + public global::OpenAI.ListBatchesResponseObject? Type649 { get; set; } /// /// /// - public global::OpenAI.ListBatchesResponse? Type650 { get; set; } + public global::OpenAI.ListFilesResponse? Type650 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type651 { get; set; } + public global::System.Collections.Generic.IList? Type651 { get; set; } /// /// /// - public global::OpenAI.ListBatchesResponseObject? Type652 { get; set; } + public global::OpenAI.OpenAIFile? Type652 { get; set; } /// /// /// - public global::OpenAI.AuditLogActorServiceAccount? Type653 { get; set; } + public global::OpenAI.OpenAIFileObject? Type653 { get; set; } /// /// /// - public global::OpenAI.AuditLogActorUser? Type654 { get; set; } + public global::OpenAI.OpenAIFilePurpose? Type654 { get; set; } /// /// /// - public global::OpenAI.AuditLogActorApiKey? Type655 { get; set; } + public global::OpenAI.OpenAIFileStatus? Type655 { get; set; } /// /// /// - public global::OpenAI.AuditLogActorApiKeyType? Type656 { get; set; } + public global::OpenAI.ListFilesResponseObject? Type656 { get; set; } /// /// /// - public global::OpenAI.AuditLogActorSession? Type657 { get; set; } + public global::OpenAI.ListFineTuningJobCheckpointsResponse? Type657 { get; set; } /// /// /// - public global::OpenAI.AuditLogActor? Type658 { get; set; } + public global::System.Collections.Generic.IList? Type658 { get; set; } /// /// /// - public global::OpenAI.AuditLogActorType? Type659 { get; set; } + public global::OpenAI.ListFineTuningJobCheckpointsResponseObject? Type659 { get; set; } /// /// /// - public global::OpenAI.AuditLogEventType? Type660 { get; set; } + public global::OpenAI.ListFineTuningJobEventsResponse? Type660 { get; set; } /// /// /// - public global::OpenAI.AuditLog? Type661 { get; set; } + public global::System.Collections.Generic.IList? Type661 { get; set; } /// /// /// - public global::OpenAI.AuditLogProject? Type662 { get; set; } + public global::OpenAI.ListFineTuningJobEventsResponseObject? Type662 { get; set; } /// /// /// - public global::OpenAI.AuditLogApiKeyCreated? Type663 { get; set; } + public global::OpenAI.ListMessagesResponse? Type663 { get; set; } /// /// /// - public global::OpenAI.AuditLogApiKeyCreatedData? Type664 { get; set; } + public global::System.Collections.Generic.IList? Type664 { get; set; } /// /// /// - public global::OpenAI.AuditLogApiKeyUpdated? Type665 { get; set; } + public global::OpenAI.ListModelsResponse? Type665 { get; set; } /// /// /// - public global::OpenAI.AuditLogApiKeyUpdatedChangesRequested? Type666 { get; set; } + public global::OpenAI.ListModelsResponseObject? Type666 { get; set; } /// /// /// - public global::OpenAI.AuditLogApiKeyDeleted? Type667 { get; set; } + public global::System.Collections.Generic.IList? Type667 { get; set; } /// /// /// - public global::OpenAI.AuditLogInviteSent? Type668 { get; set; } + public global::OpenAI.Model15? Type668 { get; set; } /// /// /// - public global::OpenAI.AuditLogInviteSentData? Type669 { get; set; } + public global::OpenAI.ModelObject? Type669 { get; set; } /// /// /// - public global::OpenAI.AuditLogInviteAccepted? Type670 { get; set; } + public global::OpenAI.ListPaginatedFineTuningJobsResponse? Type670 { get; set; } /// /// /// - public global::OpenAI.AuditLogInviteDeleted? Type671 { get; set; } + public global::System.Collections.Generic.IList? Type671 { get; set; } /// /// /// - public global::OpenAI.AuditLogLoginFailed? Type672 { get; set; } + public global::OpenAI.ListPaginatedFineTuningJobsResponseObject? Type672 { get; set; } /// /// /// - public global::OpenAI.AuditLogLogoutFailed? Type673 { get; set; } + public global::OpenAI.ListRunStepsResponse? Type673 { get; set; } /// /// /// - public global::OpenAI.AuditLogOrganizationUpdated? Type674 { get; set; } + public global::System.Collections.Generic.IList? Type674 { get; set; } /// /// /// - public global::OpenAI.AuditLogOrganizationUpdatedChangesRequested? Type675 { get; set; } + public global::OpenAI.ListRunsResponse? Type675 { get; set; } /// /// /// - public global::OpenAI.AuditLogOrganizationUpdatedChangesRequestedSettings? Type676 { get; set; } + public global::System.Collections.Generic.IList? Type676 { get; set; } /// /// /// - public global::OpenAI.AuditLogProjectCreated? Type677 { get; set; } + public global::OpenAI.ListThreadsResponse? Type677 { get; set; } /// /// /// - public global::OpenAI.AuditLogProjectCreatedData? Type678 { get; set; } + public global::System.Collections.Generic.IList? Type678 { get; set; } /// /// /// - public global::OpenAI.AuditLogProjectUpdated? Type679 { get; set; } + public global::OpenAI.ListVectorStoreFilesResponse? Type679 { get; set; } /// /// /// - public global::OpenAI.AuditLogProjectUpdatedChangesRequested? Type680 { get; set; } + public global::System.Collections.Generic.IList? Type680 { get; set; } /// /// /// - public global::OpenAI.AuditLogProjectArchived? Type681 { get; set; } + public global::OpenAI.VectorStoreFileObject? Type681 { get; set; } /// /// /// - public global::OpenAI.AuditLogServiceAccountCreated? Type682 { get; set; } + public global::OpenAI.VectorStoreFileObjectObject? Type682 { get; set; } /// /// /// - public global::OpenAI.AuditLogServiceAccountCreatedData? Type683 { get; set; } + public global::OpenAI.VectorStoreFileObjectStatus? Type683 { get; set; } /// /// /// - public global::OpenAI.AuditLogServiceAccountUpdated? Type684 { get; set; } + public global::OpenAI.VectorStoreFileObjectLastError? Type684 { get; set; } /// /// /// - public global::OpenAI.AuditLogServiceAccountUpdatedChangesRequested? Type685 { get; set; } + public global::OpenAI.VectorStoreFileObjectLastErrorCode? Type685 { get; set; } /// /// /// - public global::OpenAI.AuditLogServiceAccountDeleted? Type686 { get; set; } + public global::OpenAI.OneOf? Type686 { get; set; } /// /// /// - public global::OpenAI.AuditLogUserAdded? Type687 { get; set; } + public global::OpenAI.StaticChunkingStrategyResponseParam? Type687 { get; set; } /// /// /// - public global::OpenAI.AuditLogUserAddedData? Type688 { get; set; } + public global::OpenAI.StaticChunkingStrategyResponseParamType? Type688 { get; set; } /// /// /// - public global::OpenAI.AuditLogUserUpdated? Type689 { get; set; } + public global::OpenAI.OtherChunkingStrategyResponseParam? Type689 { get; set; } /// /// /// - public global::OpenAI.AuditLogUserUpdatedChangesRequested? Type690 { get; set; } + public global::OpenAI.OtherChunkingStrategyResponseParamType? Type690 { get; set; } /// /// /// - public global::OpenAI.AuditLogUserDeleted? Type691 { get; set; } + public global::OpenAI.ListVectorStoresResponse? Type691 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsResponse? Type692 { get; set; } + public global::System.Collections.Generic.IList? Type692 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsResponseObject? Type693 { get; set; } + public global::OpenAI.VectorStoreObject? Type693 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type694 { get; set; } + public global::OpenAI.VectorStoreObjectObject? Type694 { get; set; } /// /// /// - public global::OpenAI.Invite? Type695 { get; set; } + public global::OpenAI.VectorStoreObjectFileCounts? Type695 { get; set; } /// /// /// - public global::OpenAI.InviteObject? Type696 { get; set; } + public global::OpenAI.VectorStoreObjectStatus? Type696 { get; set; } /// /// /// - public global::OpenAI.InviteRole? Type697 { get; set; } + public global::OpenAI.ModifyAssistantRequest? Type697 { get; set; } /// /// /// - public global::OpenAI.InviteStatus? Type698 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResources? Type698 { get; set; } /// /// /// - public global::OpenAI.InviteListResponse? Type699 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResourcesCodeInterpreter? Type699 { get; set; } /// /// /// - public global::OpenAI.InviteListResponseObject? Type700 { get; set; } + public global::OpenAI.ModifyAssistantRequestToolResourcesFileSearch? Type700 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type701 { get; set; } + public global::OpenAI.ModifyMessageRequest? Type701 { get; set; } /// /// /// - public global::OpenAI.InviteRequest? Type702 { get; set; } + public global::OpenAI.ModifyRunRequest? Type702 { get; set; } /// /// /// - public global::OpenAI.InviteRequestRole? Type703 { get; set; } + public global::OpenAI.ModifyThreadRequest? Type703 { get; set; } /// /// /// - public global::OpenAI.InviteDeleteResponse? Type704 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResources? Type704 { get; set; } /// /// /// - public global::OpenAI.InviteDeleteResponseObject? Type705 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter? Type705 { get; set; } /// /// /// - public global::OpenAI.User? Type706 { get; set; } + public global::OpenAI.ModifyThreadRequestToolResourcesFileSearch? Type706 { get; set; } /// /// /// - public global::OpenAI.UserObject? Type707 { get; set; } + public global::OpenAI.Project? Type707 { get; set; } /// /// /// - public global::OpenAI.UserRole? Type708 { get; set; } + public global::OpenAI.ProjectObject? Type708 { get; set; } /// /// /// - public global::OpenAI.UserListResponse? Type709 { get; set; } + public global::OpenAI.ProjectStatus? Type709 { get; set; } /// /// /// - public global::OpenAI.UserListResponseObject? Type710 { get; set; } + public global::OpenAI.ProjectApiKey? Type710 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type711 { get; set; } + public global::OpenAI.ProjectApiKeyObject? Type711 { get; set; } /// /// /// - public global::OpenAI.UserRoleUpdateRequest? Type712 { get; set; } + public global::OpenAI.ProjectApiKeyOwner? Type712 { get; set; } /// /// /// - public global::OpenAI.UserRoleUpdateRequestRole? Type713 { get; set; } + public global::OpenAI.ProjectApiKeyOwnerType? Type713 { get; set; } /// /// /// - public global::OpenAI.UserDeleteResponse? Type714 { get; set; } + public global::OpenAI.ProjectUser? Type714 { get; set; } /// /// /// - public global::OpenAI.UserDeleteResponseObject? Type715 { get; set; } + public global::OpenAI.ProjectUserObject? Type715 { get; set; } /// /// /// - public global::OpenAI.Project? Type716 { get; set; } + public global::OpenAI.ProjectUserRole? Type716 { get; set; } /// /// /// - public global::OpenAI.ProjectObject? Type717 { get; set; } + public global::OpenAI.ProjectServiceAccount? Type717 { get; set; } /// /// /// - public global::OpenAI.ProjectStatus? Type718 { get; set; } + public global::OpenAI.ProjectServiceAccountObject? Type718 { get; set; } /// /// /// - public global::OpenAI.ProjectListResponse? Type719 { get; set; } + public global::OpenAI.ProjectServiceAccountRole? Type719 { get; set; } /// /// /// - public global::OpenAI.ProjectListResponseObject? Type720 { get; set; } + public global::OpenAI.ProjectApiKeyDeleteResponse? Type720 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type721 { get; set; } + public global::OpenAI.ProjectApiKeyDeleteResponseObject? Type721 { get; set; } /// /// /// - public global::OpenAI.ProjectCreateRequest? Type722 { get; set; } + public global::OpenAI.ProjectApiKeyListResponse? Type722 { get; set; } /// /// /// - public global::OpenAI.ProjectUpdateRequest? Type723 { get; set; } + public global::OpenAI.ProjectApiKeyListResponseObject? Type723 { get; set; } /// /// /// - public global::OpenAI.DefaultProjectErrorResponse? Type724 { get; set; } + public global::System.Collections.Generic.IList? Type724 { get; set; } /// /// /// - public global::OpenAI.ProjectUser? Type725 { get; set; } + public global::OpenAI.ProjectCreateRequest? Type725 { get; set; } /// /// /// - public global::OpenAI.ProjectUserObject? Type726 { get; set; } + public global::OpenAI.ProjectListResponse? Type726 { get; set; } /// /// /// - public global::OpenAI.ProjectUserRole? Type727 { get; set; } + public global::OpenAI.ProjectListResponseObject? Type727 { get; set; } /// /// /// - public global::OpenAI.ProjectUserListResponse? Type728 { get; set; } + public global::System.Collections.Generic.IList? Type728 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type729 { get; set; } + public global::OpenAI.ProjectServiceAccountApiKey? Type729 { get; set; } /// /// /// - public global::OpenAI.ProjectUserCreateRequest? Type730 { get; set; } + public global::OpenAI.ProjectServiceAccountApiKeyObject? Type730 { get; set; } /// /// /// - public global::OpenAI.ProjectUserCreateRequestRole? Type731 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateRequest? Type731 { get; set; } /// /// /// - public global::OpenAI.ProjectUserUpdateRequest? Type732 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponse? Type732 { get; set; } /// /// /// - public global::OpenAI.ProjectUserUpdateRequestRole? Type733 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponseObject? Type733 { get; set; } /// /// /// - public global::OpenAI.ProjectUserDeleteResponse? Type734 { get; set; } + public global::OpenAI.ProjectServiceAccountCreateResponseRole? Type734 { get; set; } /// /// /// - public global::OpenAI.ProjectUserDeleteResponseObject? Type735 { get; set; } + public global::OpenAI.ProjectServiceAccountDeleteResponse? Type735 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccount? Type736 { get; set; } + public global::OpenAI.ProjectServiceAccountDeleteResponseObject? Type736 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountObject? Type737 { get; set; } + public global::OpenAI.ProjectServiceAccountListResponse? Type737 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountRole? Type738 { get; set; } + public global::OpenAI.ProjectServiceAccountListResponseObject? Type738 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountListResponse? Type739 { get; set; } + public global::System.Collections.Generic.IList? Type739 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountListResponseObject? Type740 { get; set; } + public global::OpenAI.ProjectUpdateRequest? Type740 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type741 { get; set; } + public global::OpenAI.ProjectUserCreateRequest? Type741 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateRequest? Type742 { get; set; } + public global::OpenAI.ProjectUserCreateRequestRole? Type742 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponse? Type743 { get; set; } + public global::OpenAI.ProjectUserDeleteResponse? Type743 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponseObject? Type744 { get; set; } + public global::OpenAI.ProjectUserDeleteResponseObject? Type744 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountCreateResponseRole? Type745 { get; set; } + public global::OpenAI.ProjectUserListResponse? Type745 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountApiKey? Type746 { get; set; } + public global::System.Collections.Generic.IList? Type746 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountApiKeyObject? Type747 { get; set; } + public global::OpenAI.ProjectUserUpdateRequest? Type747 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountDeleteResponse? Type748 { get; set; } + public global::OpenAI.ProjectUserUpdateRequestRole? Type748 { get; set; } /// /// /// - public global::OpenAI.ProjectServiceAccountDeleteResponseObject? Type749 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemCreate? Type749 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKey? Type750 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemCreateItem? Type750 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyObject? Type751 { get; set; } + public global::System.Collections.Generic.IList? Type751 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyOwner? Type752 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem? Type752 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyOwnerType? Type753 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemDelete? Type753 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyListResponse? Type754 { get; set; } + public global::OpenAI.RealtimeClientEventConversationItemTruncate? Type754 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyListResponseObject? Type755 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferAppend? Type755 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type756 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferClear? Type756 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyDeleteResponse? Type757 { get; set; } + public global::OpenAI.RealtimeClientEventInputAudioBufferCommit? Type757 { get; set; } /// /// /// - public global::OpenAI.ProjectApiKeyDeleteResponseObject? Type758 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCancel? Type758 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventType? Type759 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreate? Type759 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEventBase? Type760 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreateResponse? Type760 { get; set; } /// /// /// - public global::OpenAI.RealtimeAudioFormat? Type761 { get; set; } + public global::System.Collections.Generic.IList? Type761 { get; set; } /// /// /// - public global::OpenAI.RealtimeSession? Type762 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreateResponseTool? Type762 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionObject? Type763 { get; set; } + public global::OpenAI.OneOf? Type763 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionVoice? Type764 { get; set; } + public global::OpenAI.RealtimeClientEventResponseCreateResponseMaxOutputTokens? Type764 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionInputAudioTranscription? Type765 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdate? Type765 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTurnDetection? Type766 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdateSession? Type766 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTurnDetectionType? Type767 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? Type767 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type768 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? Type768 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionTool? Type769 { get; set; } + public global::System.Collections.Generic.IList? Type769 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionToolChoice? Type770 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdateSessionTool? Type770 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type771 { get; set; } + public global::OpenAI.OneOf? Type771 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionMaxOutputTokens? Type772 { get; set; } + public global::OpenAI.RealtimeClientEventSessionUpdateSessionMaxOutputTokens? Type772 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversation? Type773 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreated? Type773 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationObject? Type774 { get; set; } + public global::OpenAI.RealtimeServerEventConversationCreatedConversation? Type774 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItem? Type775 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemCreated? Type775 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemObject? Type776 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemCreatedItem? Type776 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemType? Type777 { get; set; } + public global::System.Collections.Generic.IList? Type777 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemStatus? Type778 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem? Type778 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemRole? Type779 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemDeleted? Type779 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type780 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? Type780 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemContentItem? Type781 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? Type781 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemContentItemType? Type782 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? Type782 { get; set; } /// /// /// - public global::OpenAI.RealtimeContentPart? Type783 { get; set; } + public global::OpenAI.RealtimeServerEventConversationItemTruncated? Type783 { get; set; } /// /// /// - public global::OpenAI.RealtimeContentPartType? Type784 { get; set; } + public global::OpenAI.RealtimeServerEventError? Type784 { get; set; } /// /// /// - public global::OpenAI.RealtimeErrorDetails? Type785 { get; set; } + public global::OpenAI.RealtimeServerEventErrorError? Type785 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponse? Type786 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCleared? Type786 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseObject? Type787 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? Type787 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseStatus? Type788 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? Type788 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type789 { get; set; } + public global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? Type789 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseUsage? Type790 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdated? Type790 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdate? Type791 { get; set; } + public global::System.Collections.Generic.IList? Type791 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdateType? Type792 { get; set; } + public global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? Type792 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferAppend? Type793 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDelta? Type793 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferAppendType? Type794 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioDone? Type794 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommit? Type795 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? Type795 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommitType? Type796 { get; set; } + public global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? Type796 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClear? Type797 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAdded? Type797 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClearType? Type798 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? Type798 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreate? Type799 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDone? Type799 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreateType? Type800 { get; set; } + public global::OpenAI.RealtimeServerEventResponseContentPartDonePart? Type800 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncate? Type801 { get; set; } + public global::OpenAI.RealtimeServerEventResponseCreated? Type801 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncateType? Type802 { get; set; } + public global::OpenAI.RealtimeServerEventResponseCreatedResponse? Type802 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDelete? Type803 { get; set; } + public global::System.Collections.Generic.IList? Type803 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeleteType? Type804 { get; set; } + public global::OpenAI.RealtimeServerEventResponseDone? Type804 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreate? Type805 { get; set; } + public global::OpenAI.RealtimeServerEventResponseDoneResponse? Type805 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateType? Type806 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? Type806 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponse? Type807 { get; set; } + public global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? Type807 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type808 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemAdded? Type808 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseModalitie? Type809 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem? Type809 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseVoice? Type810 { get; set; } + public global::System.Collections.Generic.IList? Type810 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type811 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem? Type811 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseTool? Type812 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemDone? Type812 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type813 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem? Type813 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseToolChoice? Type814 { get; set; } + public global::System.Collections.Generic.IList? Type814 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type815 { get; set; } + public global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem? Type815 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreateResponseMaxOutputTokens? Type816 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDelta? Type816 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCancel? Type817 { get; set; } + public global::OpenAI.RealtimeServerEventResponseTextDone? Type817 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCancelType? Type818 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreated? Type818 { get; set; } /// /// /// - public global::OpenAI.RealtimeError? Type819 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreatedSession? Type819 { get; set; } /// /// /// - public global::OpenAI.RealtimeErrorType? Type820 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? Type820 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionCreated? Type821 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? Type821 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionCreatedType? Type822 { get; set; } + public global::System.Collections.Generic.IList? Type822 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdated? Type823 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreatedSessionTool? Type823 { get; set; } /// /// /// - public global::OpenAI.RealtimeSessionUpdatedType? Type824 { get; set; } + public global::OpenAI.OneOf? Type824 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationCreated? Type825 { get; set; } + public global::OpenAI.RealtimeServerEventSessionCreatedSessionMaxOutputTokens? Type825 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationCreatedType? Type826 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdated? Type826 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreated? Type827 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdatedSession? Type827 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemCreatedType? Type828 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? Type828 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompleted? Type829 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? Type829 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompletedType? Type830 { get; set; } + public global::System.Collections.Generic.IList? Type830 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailed? Type831 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool? Type831 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailedType? Type832 { get; set; } + public global::OpenAI.OneOf? Type832 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncated? Type833 { get; set; } + public global::OpenAI.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? Type833 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemTruncatedType? Type834 { get; set; } + public global::OpenAI.SubmitToolOutputsRunRequest? Type834 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeleted? Type835 { get; set; } + public global::System.Collections.Generic.IList? Type835 { get; set; } /// /// /// - public global::OpenAI.RealtimeConversationItemDeletedType? Type836 { get; set; } + public global::OpenAI.SubmitToolOutputsRunRequestToolOutput? Type836 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommitted? Type837 { get; set; } + public global::OpenAI.UpdateVectorStoreRequest? Type837 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCommittedType? Type838 { get; set; } + public global::OpenAI.Upload? Type838 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferCleared? Type839 { get; set; } + public global::OpenAI.UploadStatus? Type839 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferClearedType? Type840 { get; set; } + public global::OpenAI.UploadObject? Type840 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStarted? Type841 { get; set; } + public global::OpenAI.UploadPart? Type841 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStartedType? Type842 { get; set; } + public global::OpenAI.UploadPartObject? Type842 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStopped? Type843 { get; set; } + public global::OpenAI.User? Type843 { get; set; } /// /// /// - public global::OpenAI.RealtimeInputAudioBufferSpeechStoppedType? Type844 { get; set; } + public global::OpenAI.UserObject? Type844 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreated? Type845 { get; set; } + public global::OpenAI.UserRole? Type845 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseCreatedType? Type846 { get; set; } + public global::OpenAI.UserDeleteResponse? Type846 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseDone? Type847 { get; set; } + public global::OpenAI.UserDeleteResponseObject? Type847 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseDoneType? Type848 { get; set; } + public global::OpenAI.UserListResponse? Type848 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemAdded? Type849 { get; set; } + public global::OpenAI.UserListResponseObject? Type849 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemAddedType? Type850 { get; set; } + public global::System.Collections.Generic.IList? Type850 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemDone? Type851 { get; set; } + public global::OpenAI.UserRoleUpdateRequest? Type851 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseOutputItemDoneType? Type852 { get; set; } + public global::OpenAI.UserRoleUpdateRequestRole? Type852 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartAdded? Type853 { get; set; } + public global::OpenAI.VectorStoreFileBatchObject? Type853 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartAddedType? Type854 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectObject? Type854 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartDone? Type855 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectStatus? Type855 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseContentPartDoneType? Type856 { get; set; } + public global::OpenAI.VectorStoreFileBatchObjectFileCounts? Type856 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDelta? Type857 { get; set; } + public global::OpenAI.RealtimeServerEventType? Type857 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDeltaType? Type858 { get; set; } + public global::OpenAI.RealtimeServerEventBase? Type858 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDone? Type859 { get; set; } + public global::OpenAI.RealtimeAudioFormat? Type859 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseTextDoneType? Type860 { get; set; } + public global::OpenAI.RealtimeSession? Type860 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDelta? Type861 { get; set; } + public global::OpenAI.RealtimeSessionObject? Type861 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDeltaType? Type862 { get; set; } + public global::OpenAI.RealtimeSessionVoice? Type862 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDone? Type863 { get; set; } + public global::OpenAI.RealtimeSessionInputAudioTranscription? Type863 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioTranscriptDoneType? Type864 { get; set; } + public global::OpenAI.RealtimeSessionTurnDetection? Type864 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDelta? Type865 { get; set; } + public global::OpenAI.RealtimeSessionTurnDetectionType? Type865 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDeltaType? Type866 { get; set; } + public global::System.Collections.Generic.IList? Type866 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDone? Type867 { get; set; } + public global::OpenAI.RealtimeSessionTool? Type867 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseAudioDoneType? Type868 { get; set; } + public global::OpenAI.RealtimeSessionToolChoice? Type868 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDelta? Type869 { get; set; } + public global::OpenAI.OneOf? Type869 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDeltaType? Type870 { get; set; } + public global::OpenAI.RealtimeSessionMaxOutputTokens? Type870 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDone? Type871 { get; set; } + public global::OpenAI.RealtimeConversation? Type871 { get; set; } /// /// /// - public global::OpenAI.RealtimeResponseFunctionCallArgumentsDoneType? Type872 { get; set; } + public global::OpenAI.RealtimeConversationObject? Type872 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdated? Type873 { get; set; } + public global::OpenAI.RealtimeConversationItem? Type873 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdatedType? Type874 { get; set; } + public global::OpenAI.RealtimeConversationItemObject? Type874 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type875 { get; set; } + public global::OpenAI.RealtimeConversationItemType? Type875 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdatedRateLimit? Type876 { get; set; } + public global::OpenAI.RealtimeConversationItemStatus? Type876 { get; set; } /// /// /// - public global::OpenAI.RealtimeRateLimitsUpdatedRateLimitName? Type877 { get; set; } + public global::OpenAI.RealtimeConversationItemRole? Type877 { get; set; } /// /// /// - public global::OpenAI.RealtimeServerEvent? Type878 { get; set; } + public global::System.Collections.Generic.IList? Type878 { get; set; } /// /// /// - public global::OpenAI.CreateBatchRequest? Type879 { get; set; } + public global::OpenAI.RealtimeConversationItemContentItem? Type879 { get; set; } /// /// /// - public global::OpenAI.CreateBatchRequestEndpoint? Type880 { get; set; } + public global::OpenAI.RealtimeConversationItemContentItemType? Type880 { get; set; } /// /// /// - public global::OpenAI.CreateBatchRequestCompletionWindow? Type881 { get; set; } + public global::OpenAI.RealtimeContentPart? Type881 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type882 { get; set; } + public global::OpenAI.RealtimeContentPartType? Type882 { get; set; } /// /// /// - public global::OpenAI.ListAssistantsOrder? Type883 { get; set; } + public global::OpenAI.RealtimeErrorDetails? Type883 { get; set; } /// /// /// - public global::OpenAI.ListMessagesOrder? Type884 { get; set; } + public global::OpenAI.RealtimeResponse? Type884 { get; set; } /// /// /// - public global::OpenAI.ListRunsOrder? Type885 { get; set; } + public global::OpenAI.RealtimeResponseObject? Type885 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type886 { get; set; } + public global::OpenAI.RealtimeResponseStatus? Type886 { get; set; } /// /// /// - public global::OpenAI.CreateRunIncludeItem? Type887 { get; set; } + public global::System.Collections.Generic.IList? Type887 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsOrder? Type888 { get; set; } + public global::OpenAI.RealtimeResponseUsage? Type888 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type889 { get; set; } + public global::OpenAI.RealtimeSessionUpdate? Type889 { get; set; } /// /// /// - public global::OpenAI.ListRunStepsIncludeItem? Type890 { get; set; } + public global::OpenAI.RealtimeSessionUpdateType? Type890 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type891 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferAppend? Type891 { get; set; } /// /// /// - public global::OpenAI.GetRunStepIncludeItem? Type892 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferAppendType? Type892 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoresOrder? Type893 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommit? Type893 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesOrder? Type894 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferCommitType? Type894 { get; set; } /// /// /// - public global::OpenAI.ListVectorStoreFilesFilter? Type895 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClear? Type895 { get; set; } /// /// /// - public global::OpenAI.ListFilesInVectorStoreBatchOrder? Type896 { get; set; } + public global::OpenAI.RealtimeInputAudioBufferClearType? Type896 { get; set; } /// /// /// - public global::OpenAI.ListFilesInVectorStoreBatchFilter? Type897 { get; set; } + public global::OpenAI.RealtimeConversationItemCreate? Type897 { get; set; } /// /// /// - public global::OpenAI.ListAuditLogsEffectiveAt? Type898 { get; set; } + public global::OpenAI.RealtimeConversationItemCreateType? Type898 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type899 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncate? Type899 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type900 { get; set; } + public global::OpenAI.RealtimeConversationItemTruncateType? Type900 { get; set; } /// /// /// - public global::OpenAI.OneOf? Type901 { get; set; } + public global::OpenAI.RealtimeConversationItemDelete? Type901 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemDeleteType? Type902 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreate? Type903 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateType? Type904 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateResponse? Type905 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type906 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateResponseModalitie? Type907 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateResponseVoice? Type908 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type909 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateResponseTool? Type910 { get; set; } + /// + /// + /// + public global::OpenAI.OneOf? Type911 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateResponseToolChoice? Type912 { get; set; } + /// + /// + /// + public global::OpenAI.OneOf? Type913 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreateResponseMaxOutputTokens? Type914 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCancel? Type915 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCancelType? Type916 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeError? Type917 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeErrorType? Type918 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeSessionCreated? Type919 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeSessionCreatedType? Type920 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeSessionUpdated? Type921 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeSessionUpdatedType? Type922 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationCreated? Type923 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationCreatedType? Type924 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemCreated? Type925 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemCreatedType? Type926 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompleted? Type927 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionCompletedType? Type928 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailed? Type929 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemInputAudioTranscriptionFailedType? Type930 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemTruncated? Type931 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemTruncatedType? Type932 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemDeleted? Type933 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeConversationItemDeletedType? Type934 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferCommitted? Type935 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferCommittedType? Type936 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferCleared? Type937 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferClearedType? Type938 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferSpeechStarted? Type939 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferSpeechStartedType? Type940 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferSpeechStopped? Type941 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeInputAudioBufferSpeechStoppedType? Type942 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreated? Type943 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseCreatedType? Type944 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseDone? Type945 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseDoneType? Type946 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseOutputItemAdded? Type947 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseOutputItemAddedType? Type948 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseOutputItemDone? Type949 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseOutputItemDoneType? Type950 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseContentPartAdded? Type951 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseContentPartAddedType? Type952 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseContentPartDone? Type953 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseContentPartDoneType? Type954 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseTextDelta? Type955 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseTextDeltaType? Type956 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseTextDone? Type957 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseTextDoneType? Type958 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioTranscriptDelta? Type959 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioTranscriptDeltaType? Type960 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioTranscriptDone? Type961 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioTranscriptDoneType? Type962 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDelta? Type963 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDeltaType? Type964 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDone? Type965 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseAudioDoneType? Type966 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDelta? Type967 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDeltaType? Type968 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDone? Type969 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeResponseFunctionCallArgumentsDoneType? Type970 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdated? Type971 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdatedType? Type972 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type973 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdatedRateLimit? Type974 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeRateLimitsUpdatedRateLimitName? Type975 { get; set; } + /// + /// + /// + public global::OpenAI.RealtimeServerEvent? Type976 { get; set; } + /// + /// + /// + public global::OpenAI.CreateBatchRequest? Type977 { get; set; } + /// + /// + /// + public global::OpenAI.CreateBatchRequestEndpoint? Type978 { get; set; } + /// + /// + /// + public global::OpenAI.CreateBatchRequestCompletionWindow? Type979 { get; set; } + /// + /// + /// + public global::OpenAI.ListAssistantsOrder? Type980 { get; set; } + /// + /// + /// + public global::OpenAI.ListAuditLogsEffectiveAt? Type981 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type982 { get; set; } + /// + /// + /// + public global::OpenAI.ListMessagesOrder? Type983 { get; set; } + /// + /// + /// + public global::OpenAI.ListRunsOrder? Type984 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type985 { get; set; } + /// + /// + /// + public global::OpenAI.CreateRunIncludeItem? Type986 { get; set; } + /// + /// + /// + public global::OpenAI.ListRunStepsOrder? Type987 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type988 { get; set; } + /// + /// + /// + public global::OpenAI.ListRunStepsIncludeItem? Type989 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type990 { get; set; } + /// + /// + /// + public global::OpenAI.GetRunStepIncludeItem? Type991 { get; set; } + /// + /// + /// + public global::OpenAI.ListVectorStoresOrder? Type992 { get; set; } + /// + /// + /// + public global::OpenAI.ListFilesInVectorStoreBatchOrder? Type993 { get; set; } + /// + /// + /// + public global::OpenAI.ListFilesInVectorStoreBatchFilter? Type994 { get; set; } + /// + /// + /// + public global::OpenAI.ListVectorStoreFilesOrder? Type995 { get; set; } + /// + /// + /// + public global::OpenAI.ListVectorStoreFilesFilter? Type996 { get; set; } + /// + /// + /// + public global::OpenAI.OneOf? Type997 { get; set; } + /// + /// + /// + public global::OpenAI.OneOf? Type998 { get; set; } } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs index 49af29f7..53f2e7f4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateAssistant.g.cs @@ -140,12 +140,13 @@ partial void ProcessCreateAssistantResponseContent( /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs index 7942d638..1fd6fde4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateRun.g.cs @@ -154,12 +154,13 @@ partial void ProcessCreateRunResponseContent( /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs index b9ab0727..b8307999 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.CreateThreadAndRun.g.cs @@ -138,12 +138,13 @@ partial void ProcessCreateThreadAndRunResponseContent( /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs index 790decc0..66f6158b 100644 --- a/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.AssistantsClient.ModifyAssistant.g.cs @@ -146,12 +146,13 @@ partial void ProcessModifyAssistantResponseContent( /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs index 92942479..36525ea7 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ChatClient.CreateChatCompletion.g.cs @@ -22,7 +22,9 @@ partial void ProcessCreateChatCompletionResponseContent( ref string content); /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// /// The token to cancel the operation with @@ -115,15 +117,29 @@ partial void ProcessCreateChatCompletionResponseContent( } /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
/// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
@@ -148,6 +164,19 @@ partial void ProcessCreateChatCompletionResponseContent( /// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
/// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
@@ -170,7 +199,8 @@ partial void ProcessCreateChatCompletionResponseContent( /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -217,16 +247,20 @@ partial void ProcessCreateChatCompletionResponseContent( public async global::System.Threading.Tasks.Task CreateChatCompletionAsync( global::System.Collections.Generic.IList messages, global::OpenAI.AnyOf model, + bool? store = false, + global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = 0, global::System.Collections.Generic.Dictionary? logitBias = default, bool? logprobs = false, int? topLogprobs = default, int? maxCompletionTokens = default, int? n = 1, + global::System.Collections.Generic.IList? modalities = default, + global::OpenAI.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = 0, global::OpenAI.OneOf? responseFormat = default, int? seed = default, - global::OpenAI.CreateChatCompletionRequestServiceTier? serviceTier = default, + global::OpenAI.CreateChatCompletionRequestServiceTier? serviceTier = global::OpenAI.CreateChatCompletionRequestServiceTier.Auto, global::OpenAI.OneOf>? stop = default, bool? stream = false, global::OpenAI.ChatCompletionStreamOptions? streamOptions = default, @@ -242,12 +276,16 @@ partial void ProcessCreateChatCompletionResponseContent( { Messages = messages, Model = model, + Store = store, + Metadata = metadata, FrequencyPenalty = frequencyPenalty, LogitBias = logitBias, Logprobs = logprobs, TopLogprobs = topLogprobs, MaxCompletionTokens = maxCompletionTokens, N = n, + Modalities = modalities, + Audio = audio, PresencePenalty = presencePenalty, ResponseFormat = responseFormat, Seed = seed, diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs index 75d300c3..cf4806d0 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateAssistant.g.cs @@ -40,12 +40,13 @@ public partial interface IAssistantsClient /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs index 7b6244f9..c32e0d2e 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateRun.g.cs @@ -46,12 +46,13 @@ public partial interface IAssistantsClient /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs index d4dccbc8..5465c942 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.CreateThreadAndRun.g.cs @@ -38,12 +38,13 @@ public partial interface IAssistantsClient /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs index 4f2a447c..9ecc3792 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IAssistantsClient.ModifyAssistant.g.cs @@ -42,12 +42,13 @@ public partial interface IAssistantsClient /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// diff --git a/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs b/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs index a0953a95..0ef60b9a 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IChatClient.CreateChatCompletion.g.cs @@ -5,7 +5,9 @@ namespace OpenAI public partial interface IChatClient { /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// /// The token to cancel the operation with @@ -15,15 +17,29 @@ public partial interface IChatClient global::System.Threading.CancellationToken cancellationToken = default); /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
/// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
@@ -48,6 +64,19 @@ public partial interface IChatClient /// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
/// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
@@ -70,7 +99,8 @@ public partial interface IChatClient /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -117,16 +147,20 @@ public partial interface IChatClient global::System.Threading.Tasks.Task CreateChatCompletionAsync( global::System.Collections.Generic.IList messages, global::OpenAI.AnyOf model, + bool? store = false, + global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = 0, global::System.Collections.Generic.Dictionary? logitBias = default, bool? logprobs = false, int? topLogprobs = default, int? maxCompletionTokens = default, int? n = 1, + global::System.Collections.Generic.IList? modalities = default, + global::OpenAI.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = 0, global::OpenAI.OneOf? responseFormat = default, int? seed = default, - global::OpenAI.CreateChatCompletionRequestServiceTier? serviceTier = default, + global::OpenAI.CreateChatCompletionRequestServiceTier? serviceTier = global::OpenAI.CreateChatCompletionRequestServiceTier.Auto, global::OpenAI.OneOf>? stop = default, bool? stream = false, global::OpenAI.ChatCompletionStreamOptions? streamOptions = default, diff --git a/src/libs/OpenAI/Generated/OpenAI.IModelsClient.RetrieveModel.g.cs b/src/libs/OpenAI/Generated/OpenAI.IModelsClient.RetrieveModel.g.cs index b4219d8b..53249fb0 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IModelsClient.RetrieveModel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IModelsClient.RetrieveModel.g.cs @@ -12,7 +12,7 @@ public partial interface IModelsClient /// /// The token to cancel the operation with /// - global::System.Threading.Tasks.Task RetrieveModelAsync( + global::System.Threading.Tasks.Task RetrieveModelAsync( string model, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs b/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs index df0485f6..832bc9ca 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IOpenAiApi.g.cs @@ -84,12 +84,12 @@ public partial interface IOpenAiApi : global::System.IDisposable /// /// /// - public VectorStoresClient VectorStores { get; } + public InvitesClient Invites { get; } /// /// /// - public InvitesClient Invites { get; } + public ProjectsClient Projects { get; } /// /// @@ -99,7 +99,7 @@ public partial interface IOpenAiApi : global::System.IDisposable /// /// /// - public ProjectsClient Projects { get; } + public VectorStoresClient VectorStores { get; } } } \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProject.g.cs b/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProject.g.cs index 1be7f4c2..19f73a94 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProject.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProject.g.cs @@ -7,22 +7,26 @@ public partial interface IProjectsClient /// /// Modifies a project in the organization. /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, global::OpenAI.ProjectUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a project in the organization. /// + /// /// /// The updated name of the project, this name appears in reports. /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, string name, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProjectUser.g.cs b/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProjectUser.g.cs index 7ad131a5..fbd83cb3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProjectUser.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IProjectsClient.ModifyProjectUser.g.cs @@ -7,22 +7,30 @@ public partial interface IProjectsClient /// /// Modifies a user's role in the project. /// + /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::OpenAI.ProjectUserUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a user's role in the project. /// + /// + /// /// /// `owner` or `member` /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::OpenAI.ProjectUserUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/libs/OpenAI/Generated/OpenAI.IUsersClient.ModifyUser.g.cs b/src/libs/OpenAI/Generated/OpenAI.IUsersClient.ModifyUser.g.cs index ebab95b1..a2c738be 100644 --- a/src/libs/OpenAI/Generated/OpenAI.IUsersClient.ModifyUser.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.IUsersClient.ModifyUser.g.cs @@ -7,22 +7,26 @@ public partial interface IUsersClient /// /// Modifies a user's role in the organization. /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::OpenAI.UserRoleUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a user's role in the organization. /// + /// /// /// `owner` or `reader` /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::OpenAI.UserRoleUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionModalitie.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionModalitie.g.cs new file mode 100644 index 00000000..bad8abae --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionModalitie.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum ChatCompletionModalitie + { + /// + /// + /// + Text, + /// + /// + /// + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionModalitie value) + { + return value switch + { + ChatCompletionModalitie.Text => "text", + ChatCompletionModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionModalitie? ToEnum(string value) + { + return value switch + { + "text" => ChatCompletionModalitie.Text, + "audio" => ChatCompletionModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs index fcc19e5b..379ef1e5 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessage.g.cs @@ -36,6 +36,13 @@ public sealed partial class ChatCompletionRequestAssistantMessage [global::System.Text.Json.Serialization.JsonPropertyName("name")] public string? Name { get; set; } + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public global::OpenAI.ChatCompletionRequestAssistantMessageAudio? Audio { get; set; } + /// /// The tool calls generated by the model, such as function calls. /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageAudio.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageAudio.g.cs new file mode 100644 index 00000000..118201fd --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageAudio.g.cs @@ -0,0 +1,83 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + ///
+ public sealed partial class ChatCompletionRequestAssistantMessageAudio + { + /// + /// Unique identifier for a previous audio response from the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Id { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.ChatCompletionRequestAssistantMessageAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.ChatCompletionRequestAssistantMessageAudio), + jsonSerializerContext) as global::OpenAI.ChatCompletionRequestAssistantMessageAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.ChatCompletionRequestAssistantMessageAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageContentPart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageContentPart.g.cs index 544a0297..cdcfe691 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageContentPart.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestAssistantMessageContentPart.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public readonly partial struct ChatCompletionRequestAssistantMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudio.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudio.g.cs new file mode 100644 index 00000000..3ee4efc2 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudio.g.cs @@ -0,0 +1,89 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Learn about [audio inputs](/docs/guides/audio). + /// + public sealed partial class ChatCompletionRequestMessageContentPartAudio + { + /// + /// The type of the content part. Always `input_audio`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeJsonConverter))] + public global::OpenAI.ChatCompletionRequestMessageContentPartAudioType Type { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio InputAudio { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.ChatCompletionRequestMessageContentPartAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio), + jsonSerializerContext) as global::OpenAI.ChatCompletionRequestMessageContentPartAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.ChatCompletionRequestMessageContentPartAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.cs new file mode 100644 index 00000000..dc0342e8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.cs @@ -0,0 +1,90 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class ChatCompletionRequestMessageContentPartAudioInputAudio + { + /// + /// Base64 encoded audio data. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("data")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Data { get; set; } + + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudioFormat Format { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio), + jsonSerializerContext) as global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs new file mode 100644 index 00000000..9f9c60ed --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs @@ -0,0 +1,51 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + public enum ChatCompletionRequestMessageContentPartAudioInputAudioFormat + { + /// + /// + /// + Wav, + /// + /// + /// + Mp3, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestMessageContentPartAudioInputAudioFormat value) + { + return value switch + { + ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Wav => "wav", + ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Mp3 => "mp3", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestMessageContentPartAudioInputAudioFormat? ToEnum(string value) + { + return value switch + { + "wav" => ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Wav, + "mp3" => ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Mp3, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioType.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioType.g.cs new file mode 100644 index 00000000..50d014ac --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartAudioType.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The type of the content part. Always `input_audio`. + /// + public enum ChatCompletionRequestMessageContentPartAudioType + { + /// + /// + /// + InputAudio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestMessageContentPartAudioTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestMessageContentPartAudioType value) + { + return value switch + { + ChatCompletionRequestMessageContentPartAudioType.InputAudio => "input_audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestMessageContentPartAudioType? ToEnum(string value) + { + return value switch + { + "input_audio" => ChatCompletionRequestMessageContentPartAudioType.InputAudio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImage.g.cs index 18b66fb2..9f598913 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartImage.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// + /// Learn about [image inputs](/docs/guides/vision). /// public sealed partial class ChatCompletionRequestMessageContentPartImage { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartText.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartText.g.cs index 14c7b98d..0ba30265 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartText.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestMessageContentPartText.g.cs @@ -4,7 +4,7 @@ namespace OpenAI { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// public sealed partial class ChatCompletionRequestMessageContentPartText { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessageContentPart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessageContentPart.g.cs index 4cc581da..4b8da906 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessageContentPart.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestSystemMessageContentPart.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public readonly partial struct ChatCompletionRequestSystemMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestToolMessageContentPart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestToolMessageContentPart.g.cs index 761e95d3..82ea4914 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestToolMessageContentPart.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestToolMessageContentPart.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public readonly partial struct ChatCompletionRequestToolMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessageContentPart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessageContentPart.g.cs index 0a836ea5..aeae39f6 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessageContentPart.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionRequestUserMessageContentPart.g.cs @@ -11,7 +11,7 @@ namespace OpenAI public readonly partial struct ChatCompletionRequestUserMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestMessageContentPartText? Text { get; init; } @@ -46,7 +46,7 @@ public ChatCompletionRequestUserMessageContentPart(global::OpenAI.ChatCompletion } /// - /// + /// Learn about [image inputs](/docs/guides/vision). /// #if NET6_0_OR_GREATER public global::OpenAI.ChatCompletionRequestMessageContentPartImage? Image { get; init; } @@ -80,22 +80,60 @@ public ChatCompletionRequestUserMessageContentPart(global::OpenAI.ChatCompletion Image = value; } + /// + /// Learn about [audio inputs](/docs/guides/audio). + /// +#if NET6_0_OR_GREATER + public global::OpenAI.ChatCompletionRequestMessageContentPartAudio? Audio { get; init; } +#else + public global::OpenAI.ChatCompletionRequestMessageContentPartAudio? Audio { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Audio))] +#endif + public bool IsAudio => Audio != null; + + /// + /// + /// + public static implicit operator ChatCompletionRequestUserMessageContentPart(global::OpenAI.ChatCompletionRequestMessageContentPartAudio value) => new ChatCompletionRequestUserMessageContentPart(value); + + /// + /// + /// + public static implicit operator global::OpenAI.ChatCompletionRequestMessageContentPartAudio?(ChatCompletionRequestUserMessageContentPart @this) => @this.Audio; + + /// + /// + /// + public ChatCompletionRequestUserMessageContentPart(global::OpenAI.ChatCompletionRequestMessageContentPartAudio? value) + { + Audio = value; + } + /// /// /// public ChatCompletionRequestUserMessageContentPart( global::OpenAI.ChatCompletionRequestMessageContentPartText? text, - global::OpenAI.ChatCompletionRequestMessageContentPartImage? image + global::OpenAI.ChatCompletionRequestMessageContentPartImage? image, + global::OpenAI.ChatCompletionRequestMessageContentPartAudio? audio ) { Text = text; Image = image; + Audio = audio; } /// /// /// public object? Object => + Audio as object ?? Image as object ?? Text as object ; @@ -105,7 +143,7 @@ Text as object ///
public bool Validate() { - return IsText && !IsImage || !IsText && IsImage; + return IsText && !IsImage && !IsAudio || !IsText && IsImage && !IsAudio || !IsText && !IsImage && IsAudio; } /// @@ -114,6 +152,7 @@ public bool Validate() public TResult? Match( global::System.Func? text = null, global::System.Func? image = null, + global::System.Func? audio = null, bool validate = true) { if (validate) @@ -129,6 +168,10 @@ public bool Validate() { return image(Image!); } + else if (IsAudio && audio != null) + { + return audio(Audio!); + } return default(TResult); } @@ -139,6 +182,7 @@ public bool Validate() public void Match( global::System.Action? text = null, global::System.Action? image = null, + global::System.Action? audio = null, bool validate = true) { if (validate) @@ -154,6 +198,10 @@ public void Match( { image?.Invoke(Image!); } + else if (IsAudio) + { + audio?.Invoke(Audio!); + } } /// @@ -167,6 +215,8 @@ public override int GetHashCode() typeof(global::OpenAI.ChatCompletionRequestMessageContentPartText), Image, typeof(global::OpenAI.ChatCompletionRequestMessageContentPartImage), + Audio, + typeof(global::OpenAI.ChatCompletionRequestMessageContentPartAudio), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -183,7 +233,8 @@ public bool Equals(ChatCompletionRequestUserMessageContentPart other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Image, other.Image) + global::System.Collections.Generic.EqualityComparer.Default.Equals(Image, other.Image) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Audio, other.Audio) ; } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessage.g.cs index 77567715..48698dc3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessage.g.cs @@ -40,6 +40,13 @@ public sealed partial class ChatCompletionResponseMessage [global::System.Obsolete("This property marked as deprecated.")] public global::OpenAI.ChatCompletionResponseMessageFunctionCall? FunctionCall { get; set; } + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public global::OpenAI.ChatCompletionResponseMessageAudio? Audio { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessageAudio.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessageAudio.g.cs new file mode 100644 index 00000000..c0cc0b50 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ChatCompletionResponseMessageAudio.g.cs @@ -0,0 +1,108 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + ///
+ public sealed partial class ChatCompletionResponseMessageAudio + { + /// + /// Unique identifier for this audio response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Id { get; set; } + + /// + /// The Unix timestamp (in seconds) for when this audio response will
+ /// no longer be accessible on the server for use in multi-turn
+ /// conversations. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("expires_at")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.UnixTimestampJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.DateTimeOffset ExpiresAt { get; set; } + + /// + /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("data")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Data { get; set; } + + /// + /// Transcript of the audio generated by the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.ChatCompletionResponseMessageAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.ChatCompletionResponseMessageAudio), + jsonSerializerContext) as global::OpenAI.ChatCompletionResponseMessageAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.ChatCompletionResponseMessageAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsage.g.cs index e55bce4e..f9a065a2 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsage.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsage.g.cs @@ -35,6 +35,12 @@ public sealed partial class CompletionUsage [global::System.Text.Json.Serialization.JsonPropertyName("completion_tokens_details")] public global::OpenAI.CompletionUsageCompletionTokensDetails? CompletionTokensDetails { get; set; } + /// + /// Breakdown of tokens used in the prompt. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("prompt_tokens_details")] + public global::OpenAI.CompletionUsagePromptTokensDetails? PromptTokensDetails { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs index e3352c81..e69909a4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsageCompletionTokensDetails.g.cs @@ -8,6 +8,12 @@ namespace OpenAI ///
public sealed partial class CompletionUsageCompletionTokensDetails { + /// + /// Audio input tokens generated by the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_tokens")] + public int? AudioTokens { get; set; } + /// /// Tokens generated by the model for reasoning. /// diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsagePromptTokensDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsagePromptTokensDetails.g.cs new file mode 100644 index 00000000..25a777c6 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CompletionUsagePromptTokensDetails.g.cs @@ -0,0 +1,87 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Breakdown of tokens used in the prompt. + /// + public sealed partial class CompletionUsagePromptTokensDetails + { + /// + /// Audio input tokens present in the prompt. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Cached tokens present in the prompt. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("cached_tokens")] + public int? CachedTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.CompletionUsagePromptTokensDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.CompletionUsagePromptTokensDetails), + jsonSerializerContext) as global::OpenAI.CompletionUsagePromptTokensDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.CompletionUsagePromptTokensDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs index a9a3a4d2..73efe8c4 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateAssistantRequest.g.cs @@ -56,7 +56,7 @@ public sealed partial class CreateAssistantRequest public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -64,7 +64,8 @@ public sealed partial class CreateAssistantRequest public double? Temperature { get; set; } = 1; /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs index 7d7f09fb..3c2867cb 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequest.g.cs @@ -11,7 +11,10 @@ namespace OpenAI public sealed partial class CreateChatCompletionRequest { /// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). ///
[global::System.Text.Json.Serialization.JsonPropertyName("messages")] [global::System.Text.Json.Serialization.JsonRequired] @@ -26,6 +29,21 @@ public sealed partial class CreateChatCompletionRequest [global::System.Text.Json.Serialization.JsonRequired] public required global::OpenAI.AnyOf Model { get; set; } + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("store")] + public bool? Store { get; set; } = false; + + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] + public global::System.Collections.Generic.Dictionary? Metadata { get; set; } + /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
/// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
@@ -76,6 +94,25 @@ public sealed partial class CreateChatCompletionRequest [global::System.Text.Json.Serialization.JsonPropertyName("n")] public int? N { get; set; } = 1; + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public global::OpenAI.CreateChatCompletionRequestAudio? Audio { get; set; } + /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
/// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
@@ -108,11 +145,12 @@ public sealed partial class CreateChatCompletionRequest /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto ///
[global::System.Text.Json.Serialization.JsonPropertyName("service_tier")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestServiceTierJsonConverter))] - public global::OpenAI.CreateChatCompletionRequestServiceTier? ServiceTier { get; set; } + public global::OpenAI.CreateChatCompletionRequestServiceTier? ServiceTier { get; set; } = global::OpenAI.CreateChatCompletionRequestServiceTier.Auto; /// /// Up to 4 sequences where the API will stop generating further tokens. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs new file mode 100644 index 00000000..38dd9795 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudio.g.cs @@ -0,0 +1,94 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + ///
+ public sealed partial class CreateChatCompletionRequestAudio + { + /// + /// Specifies the voice type. Supported voices are `alloy`, `echo`,
+ /// `fable`, `onyx`, `nova`, and `shimmer`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.CreateChatCompletionRequestAudioVoice Voice { get; set; } + + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.CreateChatCompletionRequestAudioFormatJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.CreateChatCompletionRequestAudioFormat Format { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.CreateChatCompletionRequestAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.CreateChatCompletionRequestAudio), + jsonSerializerContext) as global::OpenAI.CreateChatCompletionRequestAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.CreateChatCompletionRequestAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs new file mode 100644 index 00000000..a3ed1663 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioFormat.g.cs @@ -0,0 +1,70 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + ///
+ public enum CreateChatCompletionRequestAudioFormat + { + /// + /// + /// + Wav, + /// + /// + /// + Mp3, + /// + /// + /// + Flac, + /// + /// + /// + Opus, + /// + /// + /// + Pcm16, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestAudioFormat value) + { + return value switch + { + CreateChatCompletionRequestAudioFormat.Wav => "wav", + CreateChatCompletionRequestAudioFormat.Mp3 => "mp3", + CreateChatCompletionRequestAudioFormat.Flac => "flac", + CreateChatCompletionRequestAudioFormat.Opus => "opus", + CreateChatCompletionRequestAudioFormat.Pcm16 => "pcm16", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestAudioFormat? ToEnum(string value) + { + return value switch + { + "wav" => CreateChatCompletionRequestAudioFormat.Wav, + "mp3" => CreateChatCompletionRequestAudioFormat.Mp3, + "flac" => CreateChatCompletionRequestAudioFormat.Flac, + "opus" => CreateChatCompletionRequestAudioFormat.Opus, + "pcm16" => CreateChatCompletionRequestAudioFormat.Pcm16, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs new file mode 100644 index 00000000..d9849f2b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestAudioVoice.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Specifies the voice type. Supported voices are `alloy`, `echo`,
+ /// `fable`, `onyx`, `nova`, and `shimmer`. + ///
+ public enum CreateChatCompletionRequestAudioVoice + { + /// + /// + /// + Alloy, + /// + /// + /// + Echo, + /// + /// + /// + Fable, + /// + /// + /// + Onyx, + /// + /// + /// + Nova, + /// + /// + /// + Shimmer, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestAudioVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestAudioVoice value) + { + return value switch + { + CreateChatCompletionRequestAudioVoice.Alloy => "alloy", + CreateChatCompletionRequestAudioVoice.Echo => "echo", + CreateChatCompletionRequestAudioVoice.Fable => "fable", + CreateChatCompletionRequestAudioVoice.Onyx => "onyx", + CreateChatCompletionRequestAudioVoice.Nova => "nova", + CreateChatCompletionRequestAudioVoice.Shimmer => "shimmer", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestAudioVoice? ToEnum(string value) + { + return value switch + { + "alloy" => CreateChatCompletionRequestAudioVoice.Alloy, + "echo" => CreateChatCompletionRequestAudioVoice.Echo, + "fable" => CreateChatCompletionRequestAudioVoice.Fable, + "onyx" => CreateChatCompletionRequestAudioVoice.Onyx, + "nova" => CreateChatCompletionRequestAudioVoice.Nova, + "shimmer" => CreateChatCompletionRequestAudioVoice.Shimmer, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestMetadata.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestMetadata.g.cs new file mode 100644 index 00000000..7b97b2ef --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestMetadata.g.cs @@ -0,0 +1,77 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + ///
+ public sealed partial class CreateChatCompletionRequestMetadata + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.CreateChatCompletionRequestMetadata? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.CreateChatCompletionRequestMetadata), + jsonSerializerContext) as global::OpenAI.CreateChatCompletionRequestMetadata; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.CreateChatCompletionRequestMetadata? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs index f531c8b4..b2e174c7 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestModel.g.cs @@ -39,6 +39,22 @@ public enum CreateChatCompletionRequestModel /// /// /// + Gpt4oRealtimePreview, + /// + /// + /// + Gpt4oRealtimePreview20241001, + /// + /// + /// + Gpt4oAudioPreview, + /// + /// + /// + Gpt4oAudioPreview20241001, + /// + /// + /// Chatgpt4oLatest, /// /// @@ -145,6 +161,10 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) CreateChatCompletionRequestModel.Gpt4o => "gpt-4o", CreateChatCompletionRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateChatCompletionRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", + CreateChatCompletionRequestModel.Gpt4oRealtimePreview => "gpt-4o-realtime-preview", + CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001 => "gpt-4o-realtime-preview-2024-10-01", + CreateChatCompletionRequestModel.Gpt4oAudioPreview => "gpt-4o-audio-preview", + CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001 => "gpt-4o-audio-preview-2024-10-01", CreateChatCompletionRequestModel.Chatgpt4oLatest => "chatgpt-4o-latest", CreateChatCompletionRequestModel.Gpt4oMini => "gpt-4o-mini", CreateChatCompletionRequestModel.Gpt4oMini20240718 => "gpt-4o-mini-2024-07-18", @@ -184,6 +204,10 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) "gpt-4o" => CreateChatCompletionRequestModel.Gpt4o, "gpt-4o-2024-08-06" => CreateChatCompletionRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateChatCompletionRequestModel.Gpt4o20240513, + "gpt-4o-realtime-preview" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview, + "gpt-4o-realtime-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001, + "gpt-4o-audio-preview" => CreateChatCompletionRequestModel.Gpt4oAudioPreview, + "gpt-4o-audio-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001, "chatgpt-4o-latest" => CreateChatCompletionRequestModel.Chatgpt4oLatest, "gpt-4o-mini" => CreateChatCompletionRequestModel.Gpt4oMini, "gpt-4o-mini-2024-07-18" => CreateChatCompletionRequestModel.Gpt4oMini20240718, diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs index a85cd49c..e514fa1d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateChatCompletionRequestServiceTier.g.cs @@ -9,7 +9,8 @@ namespace OpenAI /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto ///
public enum CreateChatCompletionRequestServiceTier { diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs index c8d6cc0c..d835598d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateRunRequest.g.cs @@ -56,7 +56,7 @@ public sealed partial class CreateRunRequest public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -64,7 +64,8 @@ public sealed partial class CreateRunRequest public double? Temperature { get; set; } = 1; /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs index 68bba054..175d205c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateThreadAndRunRequest.g.cs @@ -56,7 +56,7 @@ public sealed partial class CreateThreadAndRunRequest public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -64,7 +64,8 @@ public sealed partial class CreateThreadAndRunRequest public double? Temperature { get; set; } = 1; /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionResponseVerboseJson.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionResponseVerboseJson.g.cs index 21cba3c5..5d73ae46 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionResponseVerboseJson.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranscriptionResponseVerboseJson.g.cs @@ -20,7 +20,7 @@ public sealed partial class CreateTranscriptionResponseVerboseJson ///
[global::System.Text.Json.Serialization.JsonPropertyName("duration")] [global::System.Text.Json.Serialization.JsonRequired] - public required double Duration { get; set; } + public required string Duration { get; set; } /// /// The transcribed text. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationResponseVerboseJson.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationResponseVerboseJson.g.cs index d3b7ec67..3e4cfdb5 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationResponseVerboseJson.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.CreateTranslationResponseVerboseJson.g.cs @@ -20,7 +20,7 @@ public sealed partial class CreateTranslationResponseVerboseJson /// [global::System.Text.Json.Serialization.JsonPropertyName("duration")] [global::System.Text.Json.Serialization.JsonRequired] - public required double Duration { get; set; } + public required string Duration { get; set; } /// /// The translated text. diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ListModelsResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ListModelsResponse.g.cs index a8a31ae9..2a563f4d 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ListModelsResponse.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ListModelsResponse.g.cs @@ -20,7 +20,7 @@ public sealed partial class ListModelsResponse /// [global::System.Text.Json.Serialization.JsonPropertyName("data")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::System.Collections.Generic.IList Data { get; set; } + public required global::System.Collections.Generic.IList Data { get; set; } /// /// Additional properties that are not explicitly defined in the schema diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.Model12.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.Model15.g.cs similarity index 94% rename from src/libs/OpenAI/Generated/OpenAI.Models.Model12.g.cs rename to src/libs/OpenAI/Generated/OpenAI.Models.Model15.g.cs index 8a0ff55c..a70cec8f 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.Model12.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.Model15.g.cs @@ -6,7 +6,7 @@ namespace OpenAI /// /// Describes an OpenAI model offering that can be used with the API. /// - public sealed partial class Model12 + public sealed partial class Model15 { /// /// The model identifier, which can be referenced in the API endpoints. @@ -74,14 +74,14 @@ public string ToJson( /// /// Deserializes a JSON string using the provided JsonSerializerContext. /// - public static global::OpenAI.Model12? FromJson( + public static global::OpenAI.Model15? FromJson( string json, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return global::System.Text.Json.JsonSerializer.Deserialize( json, - typeof(global::OpenAI.Model12), - jsonSerializerContext) as global::OpenAI.Model12; + typeof(global::OpenAI.Model15), + jsonSerializerContext) as global::OpenAI.Model15; } /// @@ -91,11 +91,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::OpenAI.Model12? FromJson( + public static global::OpenAI.Model15? FromJson( string json, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.Deserialize( + return global::System.Text.Json.JsonSerializer.Deserialize( json, jsonSerializerOptions); } diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs index a3683862..e52b3fdd 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ModifyAssistantRequest.g.cs @@ -53,7 +53,7 @@ public sealed partial class ModifyAssistantRequest public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -61,7 +61,8 @@ public sealed partial class ModifyAssistantRequest public double? Temperature { get; set; } = 1; /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs new file mode 100644 index 00000000..23231f70 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreate.g.cs @@ -0,0 +1,101 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event when adding an item to the conversation. + /// + public sealed partial class RealtimeClientEventConversationItemCreate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "conversation.item.create". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the preceding item after which the new item will be inserted. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] + public string? PreviousItemId { get; set; } + + /// + /// The item to add to the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeClientEventConversationItemCreateItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventConversationItemCreate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventConversationItemCreate), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemCreate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventConversationItemCreate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItem.g.cs new file mode 100644 index 00000000..3cee52ce --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItem.g.cs @@ -0,0 +1,129 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The item to add to the conversation. + /// + public sealed partial class RealtimeClientEventConversationItemCreateItem + { + /// + /// The unique ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The type of the item ("message", "function_call", "function_call_output"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The status of the item ("completed", "in_progress", "incomplete"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status")] + public string? Status { get; set; } + + /// + /// The role of the message sender ("user", "assistant", "system"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("role")] + public string? Role { get; set; } + + /// + /// The content of the message. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// The ID of the function call (for "function_call" items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + public string? CallId { get; set; } + + /// + /// The name of the function being called (for "function_call" items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The arguments of the function call (for "function_call" items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] + public string? Arguments { get; set; } + + /// + /// The output of the function call (for "function_call_output" items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output")] + public string? Output { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventConversationItemCreateItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventConversationItemCreateItem), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemCreateItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventConversationItemCreateItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItemContentItem.g.cs new file mode 100644 index 00000000..b9065b39 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemCreateItemContentItem.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeClientEventConversationItemCreateItemContentItem + { + /// + /// The content type ("input_text", "input_audio", "text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio bytes. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventConversationItemCreateItemContentItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs new file mode 100644 index 00000000..ee6a7b52 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemDelete.g.cs @@ -0,0 +1,95 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event when you want to remove any item from the conversation history. + /// + public sealed partial class RealtimeClientEventConversationItemDelete + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "conversation.item.delete". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the item to delete. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventConversationItemDelete? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventConversationItemDelete), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemDelete; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventConversationItemDelete? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs new file mode 100644 index 00000000..75abddbe --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventConversationItemTruncate.g.cs @@ -0,0 +1,109 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event when you want to truncate a previous assistant message’s audio. + /// + public sealed partial class RealtimeClientEventConversationItemTruncate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "conversation.item.truncate". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the assistant message item to truncate. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part to truncate. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Inclusive duration up to which audio is truncated, in milliseconds. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioEndMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventConversationItemTruncate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventConversationItemTruncate), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventConversationItemTruncate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventConversationItemTruncate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs new file mode 100644 index 00000000..f19e8ca2 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferAppend.g.cs @@ -0,0 +1,95 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event to append audio bytes to the input audio buffer. + /// + public sealed partial class RealtimeClientEventInputAudioBufferAppend + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.append". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Base64-encoded audio bytes. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Audio { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventInputAudioBufferAppend? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventInputAudioBufferAppend), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventInputAudioBufferAppend; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventInputAudioBufferAppend? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs new file mode 100644 index 00000000..004e2946 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferClear.g.cs @@ -0,0 +1,88 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event to clear the audio bytes in the buffer. + /// + public sealed partial class RealtimeClientEventInputAudioBufferClear + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.clear". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventInputAudioBufferClear? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventInputAudioBufferClear), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventInputAudioBufferClear; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventInputAudioBufferClear? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs new file mode 100644 index 00000000..b2303e55 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventInputAudioBufferCommit.g.cs @@ -0,0 +1,88 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event to commit audio bytes to a user message. + /// + public sealed partial class RealtimeClientEventInputAudioBufferCommit + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.commit". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventInputAudioBufferCommit? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventInputAudioBufferCommit), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventInputAudioBufferCommit; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventInputAudioBufferCommit? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs new file mode 100644 index 00000000..771a4840 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCancel.g.cs @@ -0,0 +1,88 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event to cancel an in-progress response. + /// + public sealed partial class RealtimeClientEventResponseCancel + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "response.cancel". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventResponseCancel? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventResponseCancel), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCancel; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventResponseCancel? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs new file mode 100644 index 00000000..af446d42 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreate.g.cs @@ -0,0 +1,95 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event to trigger a response generation. + /// + public sealed partial class RealtimeClientEventResponseCreate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "response.create". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Configuration for the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeClientEventResponseCreateResponse Response { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventResponseCreate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventResponseCreate), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventResponseCreate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponse.g.cs new file mode 100644 index 00000000..7b09cff7 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponse.g.cs @@ -0,0 +1,126 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for the response. + /// + public sealed partial class RealtimeClientEventResponseCreateResponse + { + /// + /// The modalities for the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// Instructions for the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + public string? Voice { get; set; } + + /// + /// The format of output audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] + public global::OpenAI.OneOf? MaxOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventResponseCreateResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventResponseCreateResponse), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreateResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventResponseCreateResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs new file mode 100644 index 00000000..5afb18f4 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseMaxOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeClientEventResponseCreateResponseMaxOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventResponseCreateResponseMaxOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventResponseCreateResponseMaxOutputTokens value) + { + return value switch + { + RealtimeClientEventResponseCreateResponseMaxOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventResponseCreateResponseMaxOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeClientEventResponseCreateResponseMaxOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseTool.g.cs new file mode 100644 index 00000000..305ec438 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseTool.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeClientEventResponseCreateResponseTool + { + /// + /// The type of the tool. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventResponseCreateResponseTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventResponseCreateResponseTool), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreateResponseTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventResponseCreateResponseTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseToolParameters.g.cs new file mode 100644 index 00000000..220e1347 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventResponseCreateResponseToolParameters.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeClientEventResponseCreateResponseToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventResponseCreateResponseToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs new file mode 100644 index 00000000..188f8513 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdate.g.cs @@ -0,0 +1,95 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Send this event to update the session’s default configuration. + /// + public sealed partial class RealtimeClientEventSessionUpdate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be "session.update". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Session configuration to update. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("session")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeClientEventSessionUpdateSession Session { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventSessionUpdate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventSessionUpdate), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventSessionUpdate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSession.g.cs new file mode 100644 index 00000000..6a376c15 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSession.g.cs @@ -0,0 +1,144 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// Session configuration to update. + /// + public sealed partial class RealtimeClientEventSessionUpdateSession + { + /// + /// The set of modalities the model can respond with. To disable audio, set this to ["text"]. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions prepended to model calls. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. Cannot be changed once the model has responded with audio at least once. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + public string? Voice { get; set; } + + /// + /// The format of input audio. Options are "pcm16", "g711_ulaw", or "g711_alaw". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] + public string? InputAudioFormat { get; set; } + + /// + /// The format of output audio. Options are "pcm16", "g711_ulaw", or "g711_alaw". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription. Can be set to `null` to turn off. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] + public global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. Can be set to `null` to turn off. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] + public global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. Options are "auto", "none", "required", or specify a function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature for the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] + public global::OpenAI.OneOf? MaxOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventSessionUpdateSession? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventSessionUpdateSession), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSession; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventSessionUpdateSession? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionInputAudioTranscription.g.cs new file mode 100644 index 00000000..92dcfcde --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionInputAudioTranscription.g.cs @@ -0,0 +1,81 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for input audio transcription. Can be set to `null` to turn off. + /// + public sealed partial class RealtimeClientEventSessionUpdateSessionInputAudioTranscription + { + /// + /// The model to use for transcription (e.g., "whisper-1"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs new file mode 100644 index 00000000..53cdb193 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionMaxOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeClientEventSessionUpdateSessionMaxOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventSessionUpdateSessionMaxOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventSessionUpdateSessionMaxOutputTokens value) + { + return value switch + { + RealtimeClientEventSessionUpdateSessionMaxOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventSessionUpdateSessionMaxOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeClientEventSessionUpdateSessionMaxOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTool.g.cs new file mode 100644 index 00000000..f8924078 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTool.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeClientEventSessionUpdateSessionTool + { + /// + /// The type of the tool, e.g., "function". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionTool), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionToolParameters.g.cs new file mode 100644 index 00000000..ab4dcaf2 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionToolParameters.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeClientEventSessionUpdateSessionToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTurnDetection.g.cs new file mode 100644 index 00000000..a47fc6f0 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeClientEventSessionUpdateSessionTurnDetection.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for turn detection. Can be set to `null` to turn off. + /// + public sealed partial class RealtimeClientEventSessionUpdateSessionTurnDetection + { + /// + /// Type of turn detection, only "server_vad" is currently supported. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD (0.0 to 1.0). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] + public double? Threshold { get; set; } + + /// + /// Amount of audio to include before speech starts (in milliseconds). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection), + jsonSerializerContext) as global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeClientEventSessionUpdateSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationCreated.g.cs new file mode 100644 index 00000000..aba06cd0 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationCreated.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a conversation is created. Emitted right after session creation. + /// + public sealed partial class RealtimeServerEventConversationCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "conversation.created". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The conversation resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("conversation")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventConversationCreatedConversation Conversation { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationCreated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationCreatedConversation.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationCreatedConversation.g.cs new file mode 100644 index 00000000..148f9eb9 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationCreatedConversation.g.cs @@ -0,0 +1,87 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The conversation resource. + /// + public sealed partial class RealtimeServerEventConversationCreatedConversation + { + /// + /// The unique ID of the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.conversation". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationCreatedConversation? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationCreatedConversation), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationCreatedConversation; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationCreatedConversation? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs new file mode 100644 index 00000000..240ac89f --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreated.g.cs @@ -0,0 +1,103 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a conversation item is created. + /// + public sealed partial class RealtimeServerEventConversationItemCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "conversation.item.created". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the preceding item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string PreviousItemId { get; set; } + + /// + /// The item that was created. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventConversationItemCreatedItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemCreated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItem.g.cs new file mode 100644 index 00000000..d9cd543e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItem.g.cs @@ -0,0 +1,135 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The item that was created. + /// + public sealed partial class RealtimeServerEventConversationItemCreatedItem + { + /// + /// The unique ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.item". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The type of the item ("message", "function_call", "function_call_output"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The status of the item ("completed", "in_progress", "incomplete"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status")] + public string? Status { get; set; } + + /// + /// The role associated with the item ("user", "assistant", "system"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("role")] + public string? Role { get; set; } + + /// + /// The content of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// The ID of the function call (for "function_call" items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + public string? CallId { get; set; } + + /// + /// The name of the function being called. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The arguments of the function call. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] + public string? Arguments { get; set; } + + /// + /// The output of the function call (for "function_call_output" items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output")] + public string? Output { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemCreatedItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemCreatedItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemCreatedItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemCreatedItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItemContentItem.g.cs new file mode 100644 index 00000000..da57a943 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemCreatedItemContentItem.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeServerEventConversationItemCreatedItemContentItem + { + /// + /// The content type ("text", "audio", "input_text", "input_audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemCreatedItemContentItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs new file mode 100644 index 00000000..3e2114c4 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemDeleted.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when an item in the conversation is deleted. + /// + public sealed partial class RealtimeServerEventConversationItemDeleted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "conversation.item.deleted". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the item that was deleted. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemDeleted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemDeleted), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemDeleted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemDeleted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs new file mode 100644 index 00000000..7c1a6e87 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs @@ -0,0 +1,110 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when input audio transcription is enabled and a transcription succeeds. + /// + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "conversation.item.input_audio_transcription.completed". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the user message item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part containing the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The transcribed text. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs new file mode 100644 index 00000000..6f792baf --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs @@ -0,0 +1,110 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when input audio transcription is configured, and a transcription request for a user message failed. + /// + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailed + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "conversation.item.input_audio_transcription.failed". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the user message item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part containing the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Details of the transcription error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("error")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError Error { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.cs new file mode 100644 index 00000000..7730b202 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Details of the transcription error. + /// + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailedError + { + /// + /// The type of error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("code")] + public string? Code { get; set; } + + /// + /// A human-readable error message. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("message")] + public string? Message { get; set; } + + /// + /// Parameter related to the error, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("param")] + public string? Param { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs new file mode 100644 index 00000000..979928a1 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventConversationItemTruncated.g.cs @@ -0,0 +1,110 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when an earlier assistant audio message item is truncated by the client. + /// + public sealed partial class RealtimeServerEventConversationItemTruncated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "conversation.item.truncated". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the assistant message item that was truncated. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part that was truncated. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The duration up to which the audio was truncated, in milliseconds. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioEndMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventConversationItemTruncated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventConversationItemTruncated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventConversationItemTruncated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventConversationItemTruncated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs new file mode 100644 index 00000000..414906dd --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventError.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when an error occurs. + /// + public sealed partial class RealtimeServerEventError + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "error". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Details of the error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("error")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventErrorError Error { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventError), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs new file mode 100644 index 00000000..c92c8580 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventErrorError.g.cs @@ -0,0 +1,105 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Details of the error. + /// + public sealed partial class RealtimeServerEventErrorError + { + /// + /// The type of error (e.g., "invalid_request_error", "server_error"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("code")] + public string? Code { get; set; } + + /// + /// A human-readable error message. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("message")] + public string? Message { get; set; } + + /// + /// Parameter related to the error, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("param")] + public string? Param { get; set; } + + /// + /// The event_id of the client event that caused the error, if applicable. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventErrorError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventErrorError), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventErrorError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventErrorError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs new file mode 100644 index 00000000..b68928cf --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCleared.g.cs @@ -0,0 +1,89 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the input audio buffer is cleared by the client. + /// + public sealed partial class RealtimeServerEventInputAudioBufferCleared + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.cleared". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventInputAudioBufferCleared? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventInputAudioBufferCleared), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventInputAudioBufferCleared; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventInputAudioBufferCleared? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs new file mode 100644 index 00000000..cff9ee9c --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs @@ -0,0 +1,103 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode. + /// + public sealed partial class RealtimeServerEventInputAudioBufferCommitted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.committed". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the preceding item after which the new item will be inserted. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string PreviousItemId { get; set; } + + /// + /// The ID of the user message item that will be created. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventInputAudioBufferCommitted), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventInputAudioBufferCommitted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventInputAudioBufferCommitted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs new file mode 100644 index 00000000..b573d5e4 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs @@ -0,0 +1,103 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned in server turn detection mode when speech is detected. + /// + public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.speech_started". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Milliseconds since the session started when speech was detected. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_start_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioStartMs { get; set; } + + /// + /// The ID of the user message item that will be created when speech stops. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStarted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs new file mode 100644 index 00000000..ecdac70b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs @@ -0,0 +1,103 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned in server turn detection mode when speech stops. + /// + public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "input_audio_buffer.speech_stopped". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// Milliseconds since the session started when speech stopped. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioEndMs { get; set; } + + /// + /// The ID of the user message item that will be created. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventInputAudioBufferSpeechStopped? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs new file mode 100644 index 00000000..ad6c536c --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdated.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Emitted after every "response.done" event to indicate the updated rate limits. + /// + public sealed partial class RealtimeServerEventRateLimitsUpdated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "rate_limits.updated". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// List of rate limit information. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("rate_limits")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList RateLimits { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventRateLimitsUpdated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventRateLimitsUpdated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventRateLimitsUpdated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventRateLimitsUpdated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs new file mode 100644 index 00000000..a0898d0f --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit + { + /// + /// The name of the rate limit ("requests", "tokens", "input_tokens", "output_tokens"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The maximum allowed value for the rate limit. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("limit")] + public int? Limit { get; set; } + + /// + /// The remaining value before the limit is reached. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("remaining")] + public int? Remaining { get; set; } + + /// + /// Seconds until the rate limit resets. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("reset_seconds")] + public double? ResetSeconds { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventRateLimitsUpdatedRateLimit? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioDelta.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioDelta.g.cs new file mode 100644 index 00000000..781fc670 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioDelta.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the model-generated audio is updated. + /// + public sealed partial class RealtimeServerEventResponseAudioDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.audio.delta". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Base64-encoded audio data delta. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseAudioDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseAudioDelta), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseAudioDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseAudioDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioDone.g.cs new file mode 100644 index 00000000..8320df39 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioDone.g.cs @@ -0,0 +1,117 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the model-generated audio is done. Also emitted when a Response is interrupted, incomplete, or cancelled. + /// + public sealed partial class RealtimeServerEventResponseAudioDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.audio.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseAudioDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseAudioDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseAudioDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseAudioDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.cs new file mode 100644 index 00000000..b40e9d0e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the model-generated transcription of audio output is updated. + /// + public sealed partial class RealtimeServerEventResponseAudioTranscriptDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.audio_transcript.delta". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The transcript delta. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseAudioTranscriptDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioTranscriptDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioTranscriptDone.g.cs new file mode 100644 index 00000000..1a06ff35 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseAudioTranscriptDone.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the model-generated transcription of audio output is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled. + /// + public sealed partial class RealtimeServerEventResponseAudioTranscriptDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.audio_transcript.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The final transcript of the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseAudioTranscriptDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartAdded.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartAdded.g.cs new file mode 100644 index 00000000..9df1ac9c --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartAdded.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a new content part is added to an assistant message item during response generation. + /// + public sealed partial class RealtimeServerEventResponseContentPartAdded + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.content_part.added". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item to which the content part was added. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The content part that was added. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("part")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventResponseContentPartAddedPart Part { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseContentPartAdded? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseContentPartAdded), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseContentPartAdded; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseContentPartAdded? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartAddedPart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartAddedPart.g.cs new file mode 100644 index 00000000..4bcc2d25 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartAddedPart.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The content part that was added. + /// + public sealed partial class RealtimeServerEventResponseContentPartAddedPart + { + /// + /// The content type ("text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content (if type is "text"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseContentPartAddedPart), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseContentPartAddedPart; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseContentPartAddedPart? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDone.g.cs new file mode 100644 index 00000000..8a43b866 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDone.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a content part is done streaming in an assistant message item. Also emitted when a Response is interrupted, incomplete, or cancelled. + /// + public sealed partial class RealtimeServerEventResponseContentPartDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.content_part.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The content part that is done. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("part")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventResponseContentPartDonePart Part { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseContentPartDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseContentPartDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseContentPartDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseContentPartDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs new file mode 100644 index 00000000..8987bafe --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseContentPartDonePart.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The content part that is done. + /// + public sealed partial class RealtimeServerEventResponseContentPartDonePart + { + /// + /// The content type ("text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content (if type is "text"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseContentPartDonePart? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseContentPartDonePart), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseContentPartDonePart; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseContentPartDonePart? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs new file mode 100644 index 00000000..75cf59d5 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreated.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a new Response is created. The first event of response creation, where the response is in an initial state of "in_progress". + /// + public sealed partial class RealtimeServerEventResponseCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.created". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The response resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventResponseCreatedResponse Response { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseCreated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponse.g.cs new file mode 100644 index 00000000..c1937839 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponse.g.cs @@ -0,0 +1,111 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The response resource. + /// + public sealed partial class RealtimeServerEventResponseCreatedResponse + { + /// + /// The unique ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.response". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The status of the response ("in_progress"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status")] + public string? Status { get; set; } + + /// + /// Additional details about the status. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status_details")] + public object? StatusDetails { get; set; } + + /// + /// The list of output items generated by the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output")] + public global::System.Collections.Generic.IList? Output { get; set; } + + /// + /// Usage statistics for the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("usage")] + public object? Usage { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseCreatedResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponse), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseCreatedResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseOutputItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseOutputItem.g.cs new file mode 100644 index 00000000..580154ac --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseOutputItem.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// An item in the response output. + /// + public sealed partial class RealtimeServerEventResponseCreatedResponseOutputItem + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseCreatedResponseOutputItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseStatusDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseStatusDetails.g.cs new file mode 100644 index 00000000..ec7ec927 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseStatusDetails.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Additional details about the status. + /// + public sealed partial class RealtimeServerEventResponseCreatedResponseStatusDetails + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseCreatedResponseStatusDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseUsage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseUsage.g.cs new file mode 100644 index 00000000..6bb8a68c --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseCreatedResponseUsage.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Usage statistics for the response. + /// + public sealed partial class RealtimeServerEventResponseCreatedResponseUsage + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseCreatedResponseUsage? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs new file mode 100644 index 00000000..fdbebe01 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDone.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a Response is done streaming. Always emitted, no matter the final state. + /// + public sealed partial class RealtimeServerEventResponseDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The response resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventResponseDoneResponse Response { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponse.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponse.g.cs new file mode 100644 index 00000000..3079da20 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponse.g.cs @@ -0,0 +1,111 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The response resource. + /// + public sealed partial class RealtimeServerEventResponseDoneResponse + { + /// + /// The unique ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.response". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The final status of the response ("completed", "cancelled", "failed", "incomplete"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status")] + public string? Status { get; set; } + + /// + /// Additional details about the status. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status_details")] + public object? StatusDetails { get; set; } + + /// + /// The list of output items generated by the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output")] + public global::System.Collections.Generic.IList? Output { get; set; } + + /// + /// Usage statistics for the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("usage")] + public object? Usage { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseDoneResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseDoneResponse), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseDoneResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseOutputItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseOutputItem.g.cs new file mode 100644 index 00000000..26f64b70 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseOutputItem.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// An item in the response output. + /// + public sealed partial class RealtimeServerEventResponseDoneResponseOutputItem + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseDoneResponseOutputItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseStatusDetails.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseStatusDetails.g.cs new file mode 100644 index 00000000..c89e8715 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseStatusDetails.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Additional details about the status. + /// + public sealed partial class RealtimeServerEventResponseDoneResponseStatusDetails + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseDoneResponseStatusDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseUsage.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseUsage.g.cs new file mode 100644 index 00000000..dca3c469 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseDoneResponseUsage.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Usage statistics for the response. + /// + public sealed partial class RealtimeServerEventResponseDoneResponseUsage + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseDoneResponseUsage? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseDoneResponseUsage), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseDoneResponseUsage; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseDoneResponseUsage? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.cs new file mode 100644 index 00000000..35793c47 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the model-generated function call arguments are updated. + /// + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.function_call_arguments.delta". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the function call item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The ID of the function call. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string CallId { get; set; } + + /// + /// The arguments delta as a JSON string. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.cs new file mode 100644 index 00000000..ab1abb7f --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the model-generated function call arguments are done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled. + /// + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.function_call_arguments.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the function call item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The ID of the function call. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string CallId { get; set; } + + /// + /// The final arguments as a JSON string. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Arguments { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseFunctionCallArgumentsDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs new file mode 100644 index 00000000..72d21ffb --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAdded.g.cs @@ -0,0 +1,110 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a new Item is created during response generation. + /// + public sealed partial class RealtimeServerEventResponseOutputItemAdded + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.output_item.added". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response to which the item belongs. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The item that was added. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseOutputItemAdded? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseOutputItemAdded), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemAdded; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseOutputItemAdded? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItem.g.cs new file mode 100644 index 00000000..2277f59a --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItem.g.cs @@ -0,0 +1,111 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The item that was added. + /// + public sealed partial class RealtimeServerEventResponseOutputItemAddedItem + { + /// + /// The unique ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.item". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The type of the item ("message", "function_call", "function_call_output"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The status of the item ("in_progress", "completed"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status")] + public string? Status { get; set; } + + /// + /// The role associated with the item ("assistant"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("role")] + public string? Role { get; set; } + + /// + /// The content of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItemContentItem.g.cs new file mode 100644 index 00000000..099c74d3 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemAddedItemContentItem.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeServerEventResponseOutputItemAddedItemContentItem + { + /// + /// The content type ("text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseOutputItemAddedItemContentItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs new file mode 100644 index 00000000..d124824e --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDone.g.cs @@ -0,0 +1,110 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when an Item is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled. + /// + public sealed partial class RealtimeServerEventResponseOutputItemDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.output_item.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response to which the item belongs. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The completed item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseOutputItemDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseOutputItemDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseOutputItemDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItem.g.cs new file mode 100644 index 00000000..bbc16c78 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItem.g.cs @@ -0,0 +1,111 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// The completed item. + /// + public sealed partial class RealtimeServerEventResponseOutputItemDoneItem + { + /// + /// The unique ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.item". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The type of the item ("message", "function_call", "function_call_output"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The final status of the item ("completed", "incomplete"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status")] + public string? Status { get; set; } + + /// + /// The role associated with the item ("assistant"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("role")] + public string? Role { get; set; } + + /// + /// The content of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItemContentItem.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItemContentItem.g.cs new file mode 100644 index 00000000..be065870 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseOutputItemDoneItemContentItem.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeServerEventResponseOutputItemDoneItemContentItem + { + /// + /// The content type ("text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseOutputItemDoneItemContentItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseTextDelta.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseTextDelta.g.cs new file mode 100644 index 00000000..ba68a9ed --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseTextDelta.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the text value of a "text" content part is updated. + /// + public sealed partial class RealtimeServerEventResponseTextDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.text.delta". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The text delta. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseTextDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseTextDelta), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseTextDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseTextDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseTextDone.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseTextDone.g.cs new file mode 100644 index 00000000..345b17ad --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventResponseTextDone.g.cs @@ -0,0 +1,124 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when the text value of a "text" content part is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled. + /// + public sealed partial class RealtimeServerEventResponseTextDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "response.text.done". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The final text content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Text { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventResponseTextDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventResponseTextDone), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventResponseTextDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventResponseTextDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs new file mode 100644 index 00000000..4c57bdda --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreated.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a session is created. Emitted automatically when a new connection is established. + /// + public sealed partial class RealtimeServerEventSessionCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "session.created". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The session resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("session")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventSessionCreatedSession Session { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionCreated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSession.g.cs new file mode 100644 index 00000000..bb9920a0 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSession.g.cs @@ -0,0 +1,162 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// The session resource. + /// + public sealed partial class RealtimeServerEventSessionCreatedSession + { + /// + /// The unique ID of the session. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.session". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The default model used for this session. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// The set of modalities the model can respond with. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + public string? Voice { get; set; } + + /// + /// The format of input audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] + public string? InputAudioFormat { get; set; } + + /// + /// The format of output audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] + public global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] + public global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] + public global::OpenAI.OneOf? MaxOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionCreatedSession? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionCreatedSession), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSession; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionCreatedSession? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionInputAudioTranscription.g.cs new file mode 100644 index 00000000..7ef7d82b --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionInputAudioTranscription.g.cs @@ -0,0 +1,87 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for input audio transcription. + /// + public sealed partial class RealtimeServerEventSessionCreatedSessionInputAudioTranscription + { + /// + /// Whether input audio transcription is enabled. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("enabled")] + public bool? Enabled { get; set; } + + /// + /// The model used for transcription. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs new file mode 100644 index 00000000..0c67b699 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionMaxOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeServerEventSessionCreatedSessionMaxOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventSessionCreatedSessionMaxOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventSessionCreatedSessionMaxOutputTokens value) + { + return value switch + { + RealtimeServerEventSessionCreatedSessionMaxOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventSessionCreatedSessionMaxOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeServerEventSessionCreatedSessionMaxOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTool.g.cs new file mode 100644 index 00000000..f21e87d8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTool.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeServerEventSessionCreatedSessionTool + { + /// + /// The type of the tool. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionTool), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionToolParameters.g.cs new file mode 100644 index 00000000..7e058a80 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionToolParameters.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeServerEventSessionCreatedSessionToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTurnDetection.g.cs new file mode 100644 index 00000000..f2e77640 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionCreatedSessionTurnDetection.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for turn detection. + /// + public sealed partial class RealtimeServerEventSessionCreatedSessionTurnDetection + { + /// + /// The type of turn detection ("server_vad" or "none"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] + public double? Threshold { get; set; } + + /// + /// Audio included before speech starts (in milliseconds). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionCreatedSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs new file mode 100644 index 00000000..9c0a54a9 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdated.g.cs @@ -0,0 +1,96 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Returned when a session is updated. + /// + public sealed partial class RealtimeServerEventSessionUpdated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be "session.updated". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Type { get; set; } + + /// + /// The updated session resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("session")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::OpenAI.RealtimeServerEventSessionUpdatedSession Session { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionUpdated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionUpdated), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionUpdated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSession.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSession.g.cs new file mode 100644 index 00000000..798638fe --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSession.g.cs @@ -0,0 +1,162 @@ + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace OpenAI +{ + /// + /// The updated session resource. + /// + public sealed partial class RealtimeServerEventSessionUpdatedSession + { + /// + /// The unique ID of the session. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be "realtime.session". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// The default model used for this session. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// The set of modalities the model can respond with. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + public string? Voice { get; set; } + + /// + /// The format of input audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] + public string? InputAudioFormat { get; set; } + + /// + /// The format of output audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] + public global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] + public global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::OpenAI.JsonConverters.OneOfJsonConverterFactory2))] + public global::OpenAI.OneOf? MaxOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionUpdatedSession? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSession), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSession; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionUpdatedSession? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription.g.cs new file mode 100644 index 00000000..d6183673 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription.g.cs @@ -0,0 +1,87 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for input audio transcription. + /// + public sealed partial class RealtimeServerEventSessionUpdatedSessionInputAudioTranscription + { + /// + /// Whether input audio transcription is enabled. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("enabled")] + public bool? Enabled { get; set; } + + /// + /// The model used for transcription. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs new file mode 100644 index 00000000..8736863d --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.g.cs @@ -0,0 +1,45 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public enum RealtimeServerEventSessionUpdatedSessionMaxOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventSessionUpdatedSessionMaxOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventSessionUpdatedSessionMaxOutputTokens value) + { + return value switch + { + RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventSessionUpdatedSessionMaxOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeServerEventSessionUpdatedSessionMaxOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTool.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTool.g.cs new file mode 100644 index 00000000..d16bfaa8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTool.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// + /// + public sealed partial class RealtimeServerEventSessionUpdatedSessionTool + { + /// + /// The type of the tool. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionToolParameters.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionToolParameters.g.cs new file mode 100644 index 00000000..ec0888bc --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionToolParameters.g.cs @@ -0,0 +1,76 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeServerEventSessionUpdatedSessionToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTurnDetection.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTurnDetection.g.cs new file mode 100644 index 00000000..3c150aa8 --- /dev/null +++ b/src/libs/OpenAI/Generated/OpenAI.Models.RealtimeServerEventSessionUpdatedSessionTurnDetection.g.cs @@ -0,0 +1,99 @@ + +#nullable enable + +namespace OpenAI +{ + /// + /// Configuration for turn detection. + /// + public sealed partial class RealtimeServerEventSessionUpdatedSessionTurnDetection + { + /// + /// The type of turn detection ("server_vad" or "none"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] + public double? Threshold { get; set; } + + /// + /// Audio included before speech starts (in milliseconds). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection), + jsonSerializerContext) as global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::OpenAI.RealtimeServerEventSessionUpdatedSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + } +} \ No newline at end of file diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.ThreadStreamEventVariant1.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.ThreadStreamEventVariant1.g.cs index 6898f958..4025b394 100644 --- a/src/libs/OpenAI/Generated/OpenAI.Models.ThreadStreamEventVariant1.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.Models.ThreadStreamEventVariant1.g.cs @@ -8,6 +8,12 @@ namespace OpenAI /// public sealed partial class ThreadStreamEventVariant1 { + /// + /// Whether to enable input audio transcription. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("enabled")] + public bool? Enabled { get; set; } + /// /// /// diff --git a/src/libs/OpenAI/Generated/OpenAI.ModelsClient.RetrieveModel.g.cs b/src/libs/OpenAI/Generated/OpenAI.ModelsClient.RetrieveModel.g.cs index d026fd1d..19072afb 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ModelsClient.RetrieveModel.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ModelsClient.RetrieveModel.g.cs @@ -29,7 +29,7 @@ partial void ProcessRetrieveModelResponseContent( /// /// The token to cancel the operation with /// - public async global::System.Threading.Tasks.Task RetrieveModelAsync( + public async global::System.Threading.Tasks.Task RetrieveModelAsync( string model, global::System.Threading.CancellationToken cancellationToken = default) { @@ -104,7 +104,7 @@ partial void ProcessRetrieveModelResponseContent( } return - global::OpenAI.Model12.FromJson(__content, JsonSerializerContext) ?? + global::OpenAI.Model15.FromJson(__content, JsonSerializerContext) ?? throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); } } diff --git a/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs b/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs index 570fe9d2..070d29d3 100644 --- a/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.OpenAiApi.g.cs @@ -131,7 +131,7 @@ public sealed partial class OpenAiApi : global::OpenAI.IOpenAiApi, global::Syste /// /// /// - public VectorStoresClient VectorStores => new VectorStoresClient(_httpClient, authorizations: _authorizations) + public InvitesClient Invites => new InvitesClient(_httpClient, authorizations: _authorizations) { JsonSerializerContext = JsonSerializerContext, }; @@ -139,7 +139,7 @@ public sealed partial class OpenAiApi : global::OpenAI.IOpenAiApi, global::Syste /// /// /// - public InvitesClient Invites => new InvitesClient(_httpClient, authorizations: _authorizations) + public ProjectsClient Projects => new ProjectsClient(_httpClient, authorizations: _authorizations) { JsonSerializerContext = JsonSerializerContext, }; @@ -155,7 +155,7 @@ public sealed partial class OpenAiApi : global::OpenAI.IOpenAiApi, global::Syste /// /// /// - public ProjectsClient Projects => new ProjectsClient(_httpClient, authorizations: _authorizations) + public VectorStoresClient VectorStores => new VectorStoresClient(_httpClient, authorizations: _authorizations) { JsonSerializerContext = JsonSerializerContext, }; diff --git a/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProject.g.cs b/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProject.g.cs index 0a517834..61a6f184 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProject.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProject.g.cs @@ -7,10 +7,12 @@ public partial class ProjectsClient { partial void PrepareModifyProjectArguments( global::System.Net.Http.HttpClient httpClient, + ref string projectId, global::OpenAI.ProjectUpdateRequest request); partial void PrepareModifyProjectRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, global::OpenAI.ProjectUpdateRequest request); partial void ProcessModifyProjectResponse( global::System.Net.Http.HttpClient httpClient, @@ -24,10 +26,12 @@ partial void ProcessModifyProjectResponseContent( /// /// Modifies a project in the organization. /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, global::OpenAI.ProjectUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -37,10 +41,11 @@ partial void ProcessModifyProjectResponseContent( client: _httpClient); PrepareModifyProjectArguments( httpClient: _httpClient, + projectId: ref projectId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/projects/{project_id}", + path: $"/organization/projects/{projectId}", baseUri: _httpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -75,6 +80,7 @@ partial void ProcessModifyProjectResponseContent( PrepareModifyProjectRequest( httpClient: _httpClient, httpRequestMessage: httpRequest, + projectId: projectId, request: request); using var response = await _httpClient.SendAsync( @@ -117,12 +123,14 @@ partial void ProcessModifyProjectResponseContent( /// /// Modifies a project in the organization. /// + /// /// /// The updated name of the project, this name appears in reports. /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, string name, global::System.Threading.CancellationToken cancellationToken = default) { @@ -132,6 +140,7 @@ partial void ProcessModifyProjectResponseContent( }; return await ModifyProjectAsync( + projectId: projectId, request: request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProjectUser.g.cs b/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProjectUser.g.cs index c5001c1c..81bfba83 100644 --- a/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProjectUser.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.ProjectsClient.ModifyProjectUser.g.cs @@ -7,10 +7,14 @@ public partial class ProjectsClient { partial void PrepareModifyProjectUserArguments( global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref string userId, global::OpenAI.ProjectUserUpdateRequest request); partial void PrepareModifyProjectUserRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + string userId, global::OpenAI.ProjectUserUpdateRequest request); partial void ProcessModifyProjectUserResponse( global::System.Net.Http.HttpClient httpClient, @@ -24,10 +28,14 @@ partial void ProcessModifyProjectUserResponseContent( /// /// Modifies a user's role in the project. /// + /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::OpenAI.ProjectUserUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -37,10 +45,12 @@ partial void ProcessModifyProjectUserResponseContent( client: _httpClient); PrepareModifyProjectUserArguments( httpClient: _httpClient, + projectId: ref projectId, + userId: ref userId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/projects/{project_id}/users/{user_id}", + path: $"/organization/projects/{projectId}/users/{userId}", baseUri: _httpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -75,6 +85,8 @@ partial void ProcessModifyProjectUserResponseContent( PrepareModifyProjectUserRequest( httpClient: _httpClient, httpRequestMessage: httpRequest, + projectId: projectId, + userId: userId, request: request); using var response = await _httpClient.SendAsync( @@ -117,12 +129,16 @@ partial void ProcessModifyProjectUserResponseContent( /// /// Modifies a user's role in the project. /// + /// + /// /// /// `owner` or `member` /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::OpenAI.ProjectUserUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default) { @@ -132,6 +148,8 @@ partial void ProcessModifyProjectUserResponseContent( }; return await ModifyProjectUserAsync( + projectId: projectId, + userId: userId, request: request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/libs/OpenAI/Generated/OpenAI.UsersClient.ModifyUser.g.cs b/src/libs/OpenAI/Generated/OpenAI.UsersClient.ModifyUser.g.cs index e4e6b543..0d201c9c 100644 --- a/src/libs/OpenAI/Generated/OpenAI.UsersClient.ModifyUser.g.cs +++ b/src/libs/OpenAI/Generated/OpenAI.UsersClient.ModifyUser.g.cs @@ -7,10 +7,12 @@ public partial class UsersClient { partial void PrepareModifyUserArguments( global::System.Net.Http.HttpClient httpClient, + ref string userId, global::OpenAI.UserRoleUpdateRequest request); partial void PrepareModifyUserRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string userId, global::OpenAI.UserRoleUpdateRequest request); partial void ProcessModifyUserResponse( global::System.Net.Http.HttpClient httpClient, @@ -24,10 +26,12 @@ partial void ProcessModifyUserResponseContent( /// /// Modifies a user's role in the organization. /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::OpenAI.UserRoleUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -37,10 +41,11 @@ partial void ProcessModifyUserResponseContent( client: _httpClient); PrepareModifyUserArguments( httpClient: _httpClient, + userId: ref userId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/users/{user_id}", + path: $"/organization/users/{userId}", baseUri: _httpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -75,6 +80,7 @@ partial void ProcessModifyUserResponseContent( PrepareModifyUserRequest( httpClient: _httpClient, httpRequestMessage: httpRequest, + userId: userId, request: request); using var response = await _httpClient.SendAsync( @@ -117,12 +123,14 @@ partial void ProcessModifyUserResponseContent( /// /// Modifies a user's role in the organization. /// + /// /// /// `owner` or `reader` /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::OpenAI.UserRoleUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default) { @@ -132,6 +140,7 @@ partial void ProcessModifyUserResponseContent( }; return await ModifyUserAsync( + userId: userId, request: request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/libs/OpenAI/openapi.yaml b/src/libs/OpenAI/openapi.yaml index 07a741d3..6dc8ca21 100644 --- a/src/libs/OpenAI/openapi.yaml +++ b/src/libs/OpenAI/openapi.yaml @@ -13,72 +13,66 @@ info: servers: - url: https://api.openai.com/v1 paths: - /chat/completions: - post: + /assistants: + get: tags: - - Chat - summary: Creates a model response for the given chat conversation. - operationId: createChatCompletion - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - required: true + - Assistants + summary: Returns a list of assistants. + operationId: listAssistants + parameters: + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' + $ref: '#/components/schemas/ListAssistantsResponse' x-oaiMeta: - name: Create chat completion - group: chat - returns: "Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed.\n" - path: create + name: List assistants + group: assistants + beta: true + returns: 'A list of [assistant](/docs/api-reference/assistants/object) objects.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" - - title: Image input - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" - python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n }\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url: {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n }\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" - response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - - title: Functions - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" - - title: Logprobs - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n" - /completions: + request: + curl: "curl \"https://api.openai.com/v1/assistants?order=desc&limit=20\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistants = client.beta.assistants.list(\n order=\"desc\",\n limit=\"20\",\n)\nprint(my_assistants.data)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistants = await openai.beta.assistants.list({\n order: \"desc\",\n limit: \"20\",\n });\n\n console.log(myAssistants.data);\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" post: tags: - - Completions - summary: Creates a completion for the provided prompt and parameters. - operationId: createCompletion + - Assistants + summary: Create an assistant with a model and instructions. + operationId: createAssistant requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionRequest' + $ref: '#/components/schemas/CreateAssistantRequest' required: true responses: '200': @@ -86,65 +80,73 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionResponse' + $ref: '#/components/schemas/AssistantObject' x-oaiMeta: - name: Create completion - group: completions - returns: "Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed.\n" - legacy: true + name: Create assistant + group: assistants + beta: true + returns: 'An [assistant](/docs/api-reference/assistants/object) object.' examples: - - title: No streaming + - title: Code Interpreter request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n max_tokens: 7,\n temperature: 0,\n });\n\n console.log(completion);\n}\nmain();" - response: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_model_id\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - - title: Streaming + curl: "curl \"https://api.openai.com/v1/assistants\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"name\": \"Math Tutor\",\n \"tools\": [{\"type\": \"code_interpreter\"}],\n \"model\": \"gpt-4o\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name=\"Math Tutor\",\n tools=[{\"type\": \"code_interpreter\"}],\n model=\"gpt-4o\",\n)\nprint(my_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name: \"Math Tutor\",\n tools: [{ type: \"code_interpreter\" }],\n model: \"gpt-4o\",\n });\n\n console.log(myAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + - title: Files request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nfor chunk in client.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0,\n stream=True\n):\n print(chunk.choices[0].text)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n stream: true,\n });\n\n for await (const chunk of stream) {\n console.log(chunk.choices[0].text)\n }\n}\nmain();" - response: "{\n \"id\": \"cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe\",\n \"object\": \"text_completion\",\n \"created\": 1690759702,\n \"choices\": [\n {\n \"text\": \"This\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": null\n }\n ],\n \"model\": \"gpt-3.5-turbo-instruct\"\n \"system_fingerprint\": \"fp_44709d6fcb\",\n}\n" - /images/generations: - post: + curl: "curl https://api.openai.com/v1/assistants \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"tool_resources\": {\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n \"model\": \"gpt-4o\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n tool_resources={\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n model=\"gpt-4o\"\n)\nprint(my_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n tool_resources: {\n file_search: {\n vector_store_ids: [\"vs_123\"]\n }\n },\n model: \"gpt-4o\"\n });\n\n console.log(myAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009403,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + '/assistants/{assistant_id}': + get: tags: - - Images - summary: Creates an image given a prompt. - operationId: createImage - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateImageRequest' - required: true + - Assistants + summary: Retrieves an assistant. + operationId: getAssistant + parameters: + - name: assistant_id + in: path + description: The ID of the assistant to retrieve. + required: true + schema: + type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/ImagesResponse' + $ref: '#/components/schemas/AssistantObject' x-oaiMeta: - name: Create image - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + name: Retrieve assistant + group: assistants + beta: true + returns: 'The [assistant](/docs/api-reference/assistants/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/images/generations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"dall-e-3\",\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 1,\n \"size\": \"1024x1024\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.generate(\n model=\"dall-e-3\",\n prompt=\"A cute baby sea otter\",\n n=1,\n size=\"1024x1024\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.generate({ model: \"dall-e-3\", prompt: \"A cute baby sea otter\" });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/edits: + curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.retrieve(\"asst_abc123\")\nprint(my_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.retrieve(\n \"asst_abc123\"\n );\n\n console.log(myAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" post: tags: - - Images - summary: Creates an edited or extended image given an original image and a prompt. - operationId: createImageEdit + - Assistants + summary: Modifies an assistant. + operationId: modifyAssistant + parameters: + - name: assistant_id + in: path + description: The ID of the assistant to modify. + required: true + schema: + type: string requestBody: content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/CreateImageEditRequest' + $ref: '#/components/schemas/ModifyAssistantRequest' required: true responses: '200': @@ -152,97 +154,70 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ImagesResponse' + $ref: '#/components/schemas/AssistantObject' x-oaiMeta: - name: Create image edit - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + name: Modify assistant + group: assistants + beta: true + returns: 'The modified [assistant](/docs/api-reference/assistants/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/images/edits \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F mask=\"@mask.png\" \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.edit({\n image: fs.createReadStream(\"otter.png\"),\n mask: fs.createReadStream(\"mask.png\"),\n prompt: \"A cute baby sea otter wearing a beret\",\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/variations: - post: + curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"model\": \"gpt-4o\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_assistant = client.beta.assistants.update(\n \"asst_abc123\",\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n model=\"gpt-4o\"\n)\n\nprint(my_updated_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myUpdatedAssistant = await openai.beta.assistants.update(\n \"asst_abc123\",\n {\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n model: \"gpt-4o\"\n }\n );\n\n console.log(myUpdatedAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": []\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + delete: tags: - - Images - summary: Creates a variation of a given image. - operationId: createImageVariation - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageVariationRequest' - required: true + - Assistants + summary: Delete an assistant. + operationId: deleteAssistant + parameters: + - name: assistant_id + in: path + description: The ID of the assistant to delete. + required: true + schema: + type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/ImagesResponse' + $ref: '#/components/schemas/DeleteAssistantResponse' x-oaiMeta: - name: Create image variation - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + name: Delete assistant + group: assistants + beta: true + returns: Deletion status examples: request: - curl: "curl https://api.openai.com/v1/images/variations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.images.create_variation(\n image=open(\"image_edit_original.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.createVariation({\n image: fs.createReadStream(\"otter.png\"),\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /embeddings: + curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.assistants.delete(\"asst_abc123\")\nprint(response)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.assistants.del(\"asst_abc123\");\n\n console.log(response);\n}\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant.deleted\",\n \"deleted\": true\n}\n" + /audio/speech: post: tags: - - Embeddings - summary: Creates an embedding vector representing the input text. - operationId: createEmbedding + - Audio + summary: Generates audio from the input text. + operationId: createSpeech requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' + $ref: '#/components/schemas/CreateSpeechRequest' required: true responses: '200': description: OK + headers: + Transfer-Encoding: + description: chunked + schema: + type: string content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' - x-oaiMeta: - name: Create embeddings - group: embeddings - returns: 'A list of [embedding](/docs/api-reference/embeddings/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/embeddings \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\",\n \"encoding_format\": \"float\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.embeddings.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\",\n encoding_format=\"float\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const embedding = await openai.embeddings.create({\n model: \"text-embedding-ada-002\",\n input: \"The quick brown fox jumped over the lazy dog\",\n encoding_format: \"float\",\n });\n\n console.log(embedding);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n}\n" - /audio/speech: - post: - tags: - - Audio - summary: Generates audio from the input text. - operationId: createSpeech - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateSpeechRequest' - required: true - responses: - '200': - description: OK - headers: - Transfer-Encoding: - description: chunked - schema: - type: string - content: - application/octet-stream: + application/octet-stream: schema: type: string format: binary @@ -330,163 +305,166 @@ paths: python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.translations.create(\n model=\"whisper-1\",\n file=audio_file\n)\n" node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const translation = await openai.audio.translations.create({\n file: fs.createReadStream(\"speech.mp3\"),\n model: \"whisper-1\",\n });\n\n console.log(translation.text);\n}\nmain();\n" response: "{\n \"text\": \"Hello, my name is Wolfgang and I come from Germany. Where are you heading today?\"\n}\n" - /files: - get: - tags: - - Files - summary: Returns a list of files that belong to the user's organization. - operationId: listFiles - parameters: - - name: purpose - in: query - description: Only return files with the given purpose. - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFilesResponse' - x-oaiMeta: - name: List files - group: files - returns: 'A list of [File](/docs/api-reference/files/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.files.list();\n\n for await (const file of list) {\n console.log(file);\n }\n}\n\nmain();" - response: "{\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n },\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n ],\n \"object\": \"list\"\n}\n" + /batches: post: tags: - - Files - summary: "Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.\n\nThe Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.\n\nThe Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.\n\nThe Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).\n\nPlease [contact us](https://help.openai.com/) if you need to increase these storage limits.\n" - operationId: createFile + - Batch + summary: Creates and executes a batch from an uploaded file of requests + operationId: createBatch requestBody: content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/CreateFileRequest' + required: + - input_file_id + - endpoint + - completion_window + type: object + properties: + input_file_id: + type: string + description: "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size.\n" + endpoint: + enum: + - /v1/chat/completions + - /v1/embeddings + - /v1/completions + type: string + description: 'The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.' + completion_window: + enum: + - 24h + type: string + description: The time frame within which the batch should be processed. Currently only `24h` is supported. + metadata: + type: object + additionalProperties: + type: string + description: Optional custom metadata for the batch. + nullable: true required: true responses: '200': - description: OK + description: Batch created successfully. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/Batch' x-oaiMeta: - name: Upload file - group: files - returns: 'The uploaded [File](/docs/api-reference/files/object) object.' + name: Create batch + group: batch + returns: 'The created [Batch](/docs/api-reference/batch/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file=\"@mydata.jsonl\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose=\"fine-tune\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.create({\n file: fs.createReadStream(\"mydata.jsonl\"),\n purpose: \"fine-tune\",\n });\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}': - delete: + curl: "curl https://api.openai.com/v1/batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input_file_id\": \"file-abc123\",\n \"endpoint\": \"/v1/chat/completions\",\n \"completion_window\": \"24h\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.create(\n input_file_id=\"file-abc123\",\n endpoint=\"/v1/chat/completions\",\n completion_window=\"24h\"\n)\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.create({\n input_file_id: \"file-abc123\",\n endpoint: \"/v1/chat/completions\",\n completion_window: \"24h\"\n });\n\n console.log(batch);\n}\n\nmain();\n" + response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"validating\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": null,\n \"expires_at\": null,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 0,\n \"completed\": 0,\n \"failed\": 0\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + get: tags: - - Files - summary: Delete a file. - operationId: deleteFile + - Batch + summary: List your organization's batches. + operationId: listBatches parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 responses: '200': - description: OK + description: Batch listed successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteFileResponse' + $ref: '#/components/schemas/ListBatchesResponse' x-oaiMeta: - name: Delete file - group: files - returns: Deletion status. + name: List batch + group: batch + returns: 'A list of paginated [Batch](/docs/api-reference/batch/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.delete(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.del(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"deleted\": true\n}\n" + curl: "curl https://api.openai.com/v1/batches?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.list()\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.batches.list();\n\n for await (const batch of list) {\n console.log(batch);\n }\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly job\",\n }\n },\n { ... },\n ],\n \"first_id\": \"batch_abc123\",\n \"last_id\": \"batch_abc456\",\n \"has_more\": true\n}\n" + '/batches/{batch_id}': get: tags: - - Files - summary: Returns information about a specific file. - operationId: retrieveFile + - Batch + summary: Retrieves a batch. + operationId: retrieveBatch parameters: - - name: file_id + - name: batch_id in: path - description: The ID of the file to use for this request. + description: The ID of the batch to retrieve. required: true schema: type: string responses: '200': - description: OK + description: Batch retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/Batch' x-oaiMeta: - name: Retrieve file - group: files - returns: 'The [File](/docs/api-reference/files/object) object matching the specified ID.' + name: Retrieve batch + group: batch + returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.retrieve(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.retrieve(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}/content': - get: + curl: "curl https://api.openai.com/v1/batches/batch_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.retrieve(\"batch_abc123\")\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.retrieve(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" + response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + '/batches/{batch_id}/cancel': + post: tags: - - Files - summary: Returns the contents of the specified file. - operationId: downloadFile + - Batch + summary: 'Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.' + operationId: cancelBatch parameters: - - name: file_id + - name: batch_id in: path - description: The ID of the file to use for this request. + description: The ID of the batch to cancel. required: true schema: type: string responses: '200': - description: OK + description: Batch is cancelling. Returns the cancelling batch's details. content: - application/octet-stream: + application/json: schema: - type: string - format: binary + $ref: '#/components/schemas/Batch' x-oaiMeta: - name: Retrieve file content - group: files - returns: The file content. + name: Cancel batch + group: batch + returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/files/file-abc123/content \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" > file.jsonl\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncontent = client.files.content(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.content(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();\n" - /uploads: + curl: "curl https://api.openai.com/v1/batches/batch_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -X POST\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.cancel(\"batch_abc123\")\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.cancel(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" + response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"cancelling\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": 1711475133,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 23,\n \"failed\": 1\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + /chat/completions: post: tags: - - Uploads - summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search/supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" - operationId: createUpload + - Chat + summary: "Creates a model response for the given chat conversation. Learn more in the\n[text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),\nand [audio](/docs/guides/audio) guides.\n" + operationId: createChatCompletion requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateUploadRequest' + $ref: '#/components/schemas/CreateChatCompletionRequest' required: true responses: '200': @@ -494,34 +472,54 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Upload' + $ref: '#/components/schemas/CreateChatCompletionResponse' x-oaiMeta: - name: Create upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `pending`.' + name: Create chat completion + group: chat + returns: "Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed.\n" + path: create examples: - request: - curl: "curl https://api.openai.com/v1/uploads \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"purpose\": \"fine-tune\",\n \"filename\": \"training_examples.jsonl\",\n \"bytes\": 2147483648,\n \"mime_type\": \"text/jsonl\"\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"pending\",\n \"expires_at\": 1719127296\n}\n" - '/uploads/{upload_id}/parts': - post: - tags: - - Uploads - summary: "Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. \n\nEach Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.\n\nIt is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete).\n" - operationId: addUploadPart - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 + - title: Default + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + - title: Image input + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" + python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n }\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url: {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n }\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" + response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" + - title: Functions + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" + response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + - title: Logprobs + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n" + /completions: + post: + tags: + - Completions + summary: Creates a completion for the provided prompt and parameters. + operationId: createCompletion requestBody: content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/AddUploadPartRequest' + $ref: '#/components/schemas/CreateCompletionRequest' required: true responses: '200': @@ -529,34 +527,93 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/UploadPart' + $ref: '#/components/schemas/CreateCompletionResponse' x-oaiMeta: - name: Add upload part - group: uploads - returns: 'The upload [Part](/docs/api-reference/uploads/part-object) object.' + name: Create completion + group: completions + returns: "Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed.\n" + legacy: true examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/parts\n -F data=\"aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz...\"\n" - response: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719185911,\n \"upload_id\": \"upload_abc123\"\n}\n" - '/uploads/{upload_id}/complete': + - title: No streaming + request: + curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n max_tokens: 7,\n temperature: 0,\n });\n\n console.log(completion);\n}\nmain();" + response: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_model_id\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nfor chunk in client.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0,\n stream=True\n):\n print(chunk.choices[0].text)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n stream: true,\n });\n\n for await (const chunk of stream) {\n console.log(chunk.choices[0].text)\n }\n}\nmain();" + response: "{\n \"id\": \"cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe\",\n \"object\": \"text_completion\",\n \"created\": 1690759702,\n \"choices\": [\n {\n \"text\": \"This\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": null\n }\n ],\n \"model\": \"gpt-3.5-turbo-instruct\"\n \"system_fingerprint\": \"fp_44709d6fcb\",\n}\n" + /embeddings: post: tags: - - Uploads - summary: "Completes the [Upload](/docs/api-reference/uploads/object). \n\nWithin the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform.\n\nYou can specify the order of the Parts by passing in an ordered list of the Part IDs.\n\nThe number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.\n" - operationId: completeUpload + - Embeddings + summary: Creates an embedding vector representing the input text. + operationId: createEmbedding + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/CreateEmbeddingResponse' + x-oaiMeta: + name: Create embeddings + group: embeddings + returns: 'A list of [embedding](/docs/api-reference/embeddings/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/embeddings \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\",\n \"encoding_format\": \"float\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.embeddings.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\",\n encoding_format=\"float\"\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const embedding = await openai.embeddings.create({\n model: \"text-embedding-ada-002\",\n input: \"The quick brown fox jumped over the lazy dog\",\n encoding_format: \"float\",\n });\n\n console.log(embedding);\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n}\n" + /files: + get: + tags: + - Files + summary: Returns a list of files that belong to the user's organization. + operationId: listFiles parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true + - name: purpose + in: query + description: Only return files with the given purpose. schema: type: string - example: upload_abc123 + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + x-oaiMeta: + name: List files + group: files + returns: 'A list of [File](/docs/api-reference/files/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.list()\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.files.list();\n\n for await (const file of list) {\n console.log(file);\n }\n}\n\nmain();" + response: "{\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n },\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n ],\n \"object\": \"list\"\n}\n" + post: + tags: + - Files + summary: "Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.\n\nThe Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.\n\nThe Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.\n\nThe Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).\n\nPlease [contact us](https://help.openai.com/) if you need to increase these storage limits.\n" + operationId: createFile requestBody: content: - application/json: + multipart/form-data: schema: - $ref: '#/components/schemas/CompleteUploadRequest' + $ref: '#/components/schemas/CreateFileRequest' required: true responses: '200': @@ -564,44 +621,106 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Upload' + $ref: '#/components/schemas/OpenAIFile' x-oaiMeta: - name: Complete upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object.' + name: Upload file + group: files + returns: 'The uploaded [File](/docs/api-reference/files/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/complete\n -d '{\n \"part_ids\": [\"part_def456\", \"part_ghi789\"]\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - '/uploads/{upload_id}/cancel': - post: + curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file=\"@mydata.jsonl\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose=\"fine-tune\"\n)\n" + node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.create({\n file: fs.createReadStream(\"mydata.jsonl\"),\n purpose: \"fine-tune\",\n });\n\n console.log(file);\n}\n\nmain();" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" + '/files/{file_id}': + delete: tags: - - Uploads - summary: "Cancels the Upload. No Parts may be added after an Upload is cancelled.\n" - operationId: cancelUpload + - Files + summary: Delete a file. + operationId: deleteFile parameters: - - name: upload_id + - name: file_id in: path - description: "The ID of the Upload.\n" + description: The ID of the file to use for this request. required: true schema: type: string - example: upload_abc123 responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/Upload' + $ref: '#/components/schemas/DeleteFileResponse' x-oaiMeta: - name: Cancel upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`.' + name: Delete file + group: files + returns: Deletion status. examples: request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/cancel\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"cancelled\",\n \"expires_at\": 1719127296\n}\n" + curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.delete(\"file-abc123\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.del(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"deleted\": true\n}\n" + get: + tags: + - Files + summary: Returns information about a specific file. + operationId: retrieveFile + parameters: + - name: file_id + in: path + description: The ID of the file to use for this request. + required: true + schema: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + x-oaiMeta: + name: Retrieve file + group: files + returns: 'The [File](/docs/api-reference/files/object) object matching the specified ID.' + examples: + request: + curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.retrieve(\"file-abc123\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.retrieve(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" + '/files/{file_id}/content': + get: + tags: + - Files + summary: Returns the contents of the specified file. + operationId: downloadFile + parameters: + - name: file_id + in: path + description: The ID of the file to use for this request. + required: true + schema: + type: string + responses: + '200': + description: OK + content: + application/octet-stream: + schema: + type: string + format: binary + x-oaiMeta: + name: Retrieve file content + group: files + returns: The file content. + examples: + request: + curl: "curl https://api.openai.com/v1/files/file-abc123/content \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" > file.jsonl\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncontent = client.files.content(\"file-abc123\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.content(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();\n" /fine_tuning/jobs: post: tags: @@ -713,58 +832,16 @@ paths: python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.retrieve(\"ftjob-abc123\")\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.retrieve(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\n\nmain();\n" response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/events': - get: + '/fine_tuning/jobs/{fine_tuning_job_id}/cancel': + post: tags: - Fine-tuning - summary: "Get status updates for a fine-tuning job.\n" - operationId: listFineTuningEvents + summary: "Immediately cancel a fine-tune job.\n" + operationId: cancelFineTuningJob parameters: - name: fine_tuning_job_id in: path - description: "The ID of the fine-tuning job to get events for.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - - name: after - in: query - description: Identifier for the last event from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of events to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobEventsResponse' - x-oaiMeta: - name: List fine-tuning events - group: fine-tuning - returns: A list of fine-tuning event objects. - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list_events(\n fine_tuning_job_id=\"ftjob-abc123\",\n limit=2\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.list_events(id=\"ftjob-abc123\", limit=2);\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-ddTJfwuMVpfLXseO0Am0Gqjm\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"Fine tuning job successfully completed\",\n \"data\": null,\n \"type\": \"message\"\n },\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-tyiGuB72evQncpH87xe505Sv\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel\",\n \"data\": null,\n \"type\": \"message\"\n }\n ],\n \"has_more\": true\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/cancel': - post: - tags: - - Fine-tuning - summary: "Immediately cancel a fine-tune job.\n" - operationId: cancelFineTuningJob - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to cancel.\n" + description: "The ID of the fine-tuning job to cancel.\n" required: true schema: type: string @@ -826,6 +903,135 @@ paths: request: curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" response: "{\n \"object\": \"list\"\n \"data\": [\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"created_at\": 1721764867,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000\",\n \"metrics\": {\n \"full_valid_loss\": 0.134,\n \"full_valid_mean_token_accuracy\": 0.874\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 2000,\n },\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"created_at\": 1721764800,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000\",\n \"metrics\": {\n \"full_valid_loss\": 0.167,\n \"full_valid_mean_token_accuracy\": 0.781\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 1000,\n },\n ],\n \"first_id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"last_id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"has_more\": true\n}\n" + '/fine_tuning/jobs/{fine_tuning_job_id}/events': + get: + tags: + - Fine-tuning + summary: "Get status updates for a fine-tuning job.\n" + operationId: listFineTuningEvents + parameters: + - name: fine_tuning_job_id + in: path + description: "The ID of the fine-tuning job to get events for.\n" + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + schema: + type: integer + default: 20 + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFineTuningJobEventsResponse' + x-oaiMeta: + name: List fine-tuning events + group: fine-tuning + returns: A list of fine-tuning event objects. + examples: + request: + curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list_events(\n fine_tuning_job_id=\"ftjob-abc123\",\n limit=2\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.list_events(id=\"ftjob-abc123\", limit=2);\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-ddTJfwuMVpfLXseO0Am0Gqjm\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"Fine tuning job successfully completed\",\n \"data\": null,\n \"type\": \"message\"\n },\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-tyiGuB72evQncpH87xe505Sv\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel\",\n \"data\": null,\n \"type\": \"message\"\n }\n ],\n \"has_more\": true\n}\n" + /images/edits: + post: + tags: + - Images + summary: Creates an edited or extended image given an original image and a prompt. + operationId: createImageEdit + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageEditRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image edit + group: images + returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/images/edits \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F mask=\"@mask.png\" \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n)\n" + node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.edit({\n image: fs.createReadStream(\"otter.png\"),\n mask: fs.createReadStream(\"mask.png\"),\n prompt: \"A cute baby sea otter wearing a beret\",\n });\n\n console.log(image.data);\n}\nmain();" + response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" + /images/generations: + post: + tags: + - Images + summary: Creates an image given a prompt. + operationId: createImage + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateImageRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image + group: images + returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/images/generations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"dall-e-3\",\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 1,\n \"size\": \"1024x1024\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.generate(\n model=\"dall-e-3\",\n prompt=\"A cute baby sea otter\",\n n=1,\n size=\"1024x1024\"\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.generate({ model: \"dall-e-3\", prompt: \"A cute baby sea otter\" });\n\n console.log(image.data);\n}\nmain();" + response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" + /images/variations: + post: + tags: + - Images + summary: Creates a variation of a given image. + operationId: createImageVariation + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageVariationRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image variation + group: images + returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/images/variations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.images.create_variation(\n image=open(\"image_edit_original.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n)\n" + node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.createVariation({\n image: fs.createReadStream(\"otter.png\"),\n });\n\n console.log(image.data);\n}\nmain();" + response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" /models: get: tags: @@ -946,28 +1152,72 @@ paths: python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.moderations.create(\n model=\"omni-moderation-latest\",\n input=[\n {\"type\": \"text\", \"text\": \"...text to classify goes here...\"},\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://example.com/image.png\",\n # can also use base64 encoded image URLs\n # \"url\": \"data:image/jpeg;base64,abcdefg...\"\n }\n },\n ],\n)\n\nprint(response)\n" node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nconst moderation = await openai.moderations.create({\n model: \"omni-moderation-latest\",\n input: [\n { type: \"text\", text: \"...text to classify goes here...\" },\n {\n type: \"image_url\",\n image_url: {\n url: \"https://example.com/image.png\"\n // can also use base64 encoded image URLs\n // url: \"data:image/jpeg;base64,abcdefg...\"\n }\n }\n ],\n});\n\nconsole.log(moderation);\n" response: "{\n \"id\": \"modr-0d9740456c391e43c445bf0f010940c7\",\n \"model\": \"omni-moderation-latest\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"harassment\": true,\n \"harassment/threatening\": true,\n \"sexual\": false,\n \"hate\": false,\n \"hate/threatening\": false,\n \"illicit\": false,\n \"illicit/violent\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"violence\": true,\n \"violence/graphic\": true\n },\n \"category_scores\": {\n \"harassment\": 0.8189693396524255,\n \"harassment/threatening\": 0.804985420696006,\n \"sexual\": 1.573112165348997e-6,\n \"hate\": 0.007562942636942845,\n \"hate/threatening\": 0.004208854591835476,\n \"illicit\": 0.030535955153511665,\n \"illicit/violent\": 0.008925306722380033,\n \"self-harm/intent\": 0.00023023930975076432,\n \"self-harm/instructions\": 0.0002293869201073356,\n \"self-harm\": 0.012598046106750154,\n \"sexual/minors\": 2.212566909570261e-8,\n \"violence\": 0.9999992735124786,\n \"violence/graphic\": 0.843064871157054\n },\n \"category_applied_input_types\": {\n \"harassment\": [\n \"text\"\n ],\n \"harassment/threatening\": [\n \"text\"\n ],\n \"sexual\": [\n \"text\",\n \"image\"\n ],\n \"hate\": [\n \"text\"\n ],\n \"hate/threatening\": [\n \"text\"\n ],\n \"illicit\": [\n \"text\"\n ],\n \"illicit/violent\": [\n \"text\"\n ],\n \"self-harm/intent\": [\n \"text\",\n \"image\"\n ],\n \"self-harm/instructions\": [\n \"text\",\n \"image\"\n ],\n \"self-harm\": [\n \"text\",\n \"image\"\n ],\n \"sexual/minors\": [\n \"text\"\n ],\n \"violence\": [\n \"text\",\n \"image\"\n ],\n \"violence/graphic\": [\n \"text\",\n \"image\"\n ]\n }\n }\n ]\n}\n" - /assistants: + /organization/audit_logs: get: tags: - - Assistants - summary: Returns a list of assistants. - operationId: listAssistants + - Audit Logs + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this range. + schema: + type: object + properties: + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. + - name: 'project_ids[]' + in: query + description: Return only events for these projects. + schema: + type: array + items: + type: string + - name: 'event_types[]' + in: query + description: 'Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object).' + schema: + type: array + items: + $ref: '#/components/schemas/AuditLogEventType' + - name: 'actor_ids[]' + in: query + description: 'Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID.' + schema: + type: array + items: + type: string + - name: 'actor_emails[]' + in: query + description: Return only events performed by users with these emails. + schema: + type: array + items: + type: string + - name: 'resource_ids[]' + in: query + description: 'Return only events performed on these targets. For example, a project ID updated.' + schema: + type: array + items: + type: string - name: limit in: query description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: type: integer default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" @@ -980,298 +1230,285 @@ paths: type: string responses: '200': - description: OK + description: Audit logs listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ListAssistantsResponse' + $ref: '#/components/schemas/ListAuditLogsResponse' x-oaiMeta: - name: List assistants - group: assistants - beta: true - returns: 'A list of [assistant](/docs/api-reference/assistants/object) objects.' + name: List audit logs + group: audit-logs + returns: 'A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects.' examples: request: - curl: "curl \"https://api.openai.com/v1/assistants?order=desc&limit=20\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistants = client.beta.assistants.list(\n order=\"desc\",\n limit=\"20\",\n)\nprint(my_assistants.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistants = await openai.beta.assistants.list({\n order: \"desc\",\n limit: \"20\",\n });\n\n console.log(myAssistants.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/audit_logs \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\" \\\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"audit_log-xxx_yyyymmdd\",\n \"type\": \"project.archived\",\n \"effective_at\": 1722461446,\n \"actor\": {\n \"type\": \"api_key\",\n \"api_key\": {\n \"type\": \"user\",\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n }\n }\n },\n \"project.archived\": {\n \"id\": \"proj_abc\"\n },\n },\n {\n \"id\": \"audit_log-yyy__20240101\",\n \"type\": \"api_key.updated\",\n \"effective_at\": 1720804190,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.updated\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource_2.operation_2\"]\n }\n },\n }\n ],\n \"first_id\": \"audit_log-xxx__20240101\",\n \"last_id\": \"audit_log_yyy__20240101\",\n \"has_more\": true\n}\n" + /organization/invites: + get: tags: - - Assistants - summary: Create an assistant with a model and instructions. - operationId: createAssistant - requestBody: + - Invites + summary: Returns a list of invites in the organization. + operationId: list-invites + parameters: + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + responses: + '200': + description: Invites listed successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/InviteListResponse' + x-oaiMeta: + name: List invites + group: administration + returns: 'A list of [Invite](/docs/api-reference/invite/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n }\n ],\n \"first_id\": \"invite-abc\",\n \"last_id\": \"invite-abc\",\n \"has_more\": false\n}\n" + post: + tags: + - Invites + summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. + operationId: inviteUser + requestBody: + description: The invite request payload. content: application/json: schema: - $ref: '#/components/schemas/CreateAssistantRequest' + $ref: '#/components/schemas/InviteRequest' required: true responses: '200': - description: OK + description: User invited successfully. content: application/json: schema: - $ref: '#/components/schemas/AssistantObject' + $ref: '#/components/schemas/Invite' x-oaiMeta: - name: Create assistant - group: assistants - beta: true - returns: 'An [assistant](/docs/api-reference/assistants/object) object.' + name: Create invite + group: administration + returns: 'The created [Invite](/docs/api-reference/invite/object) object.' examples: - - title: Code Interpreter - request: - curl: "curl \"https://api.openai.com/v1/assistants\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"name\": \"Math Tutor\",\n \"tools\": [{\"type\": \"code_interpreter\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name=\"Math Tutor\",\n tools=[{\"type\": \"code_interpreter\"}],\n model=\"gpt-4o\",\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name: \"Math Tutor\",\n tools: [{ type: \"code_interpreter\" }],\n model: \"gpt-4o\",\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - - title: Files - request: - curl: "curl https://api.openai.com/v1/assistants \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"tool_resources\": {\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n tool_resources={\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n model=\"gpt-4o\"\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n tool_resources: {\n file_search: {\n vector_store_ids: [\"vs_123\"]\n }\n },\n model: \"gpt-4o\"\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009403,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - '/assistants/{assistant_id}': + request: + curl: "curl -X POST https://api.openai.com/v1/organization/invites \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"email\": \"user@example.com\",\n \"role\": \"owner\"\n }'\n" + response: + content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": null\n}\n" + '/organization/invites/{invite_id}': get: tags: - - Assistants - summary: Retrieves an assistant. - operationId: getAssistant + - Invites + summary: Retrieves an invite. + operationId: retrieve-invite parameters: - - name: assistant_id + - name: invite_id in: path - description: The ID of the assistant to retrieve. + description: The ID of the invite to retrieve. required: true schema: type: string responses: '200': - description: OK + description: Invite retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/AssistantObject' + $ref: '#/components/schemas/Invite' x-oaiMeta: - name: Retrieve assistant - group: assistants - beta: true - returns: 'The [assistant](/docs/api-reference/assistants/object) object matching the specified ID.' + name: Retrieve invite + group: administration + returns: 'The [Invite](/docs/api-reference/invite/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.retrieve(\"asst_abc123\")\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.retrieve(\n \"asst_abc123\"\n );\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" + delete: tags: - - Assistants - summary: Modifies an assistant. - operationId: modifyAssistant + - Invites + summary: 'Delete an invite. If the invite has already been accepted, it cannot be deleted.' + operationId: delete-invite parameters: - - name: assistant_id + - name: invite_id in: path - description: The ID of the assistant to modify. + description: The ID of the invite to delete. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyAssistantRequest' - required: true responses: '200': - description: OK + description: Invite deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/AssistantObject' + $ref: '#/components/schemas/InviteDeleteResponse' x-oaiMeta: - name: Modify assistant - group: assistants - beta: true - returns: 'The modified [assistant](/docs/api-reference/assistants/object) object.' + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted examples: request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_assistant = client.beta.assistants.update(\n \"asst_abc123\",\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n model=\"gpt-4o\"\n)\n\nprint(my_updated_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myUpdatedAssistant = await openai.beta.assistants.update(\n \"asst_abc123\",\n {\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n model: \"gpt-4o\"\n }\n );\n\n console.log(myUpdatedAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": []\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - delete: + curl: "curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.invite.deleted\",\n \"id\": \"invite-abc\",\n \"deleted\": true\n}\n" + /organization/projects: + get: tags: - - Assistants - summary: Delete an assistant. - operationId: deleteAssistant + - Projects + summary: Returns a list of projects. + operationId: list-projects parameters: - - name: assistant_id - in: path - description: The ID of the assistant to delete. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string + - name: include_archived + in: query + description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + schema: + type: boolean + default: false responses: '200': - description: OK + description: Projects listed successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteAssistantResponse' + $ref: '#/components/schemas/ProjectListResponse' x-oaiMeta: - name: Delete assistant - group: assistants - beta: true - returns: Deletion status + name: List projects + group: administration + returns: 'A list of [Project](/docs/api-reference/projects/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.assistants.delete(\"asst_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.assistants.del(\"asst_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant.deleted\",\n \"deleted\": true\n}\n" - /threads: + curl: "curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n }\n ],\n \"first_id\": \"proj-abc\",\n \"last_id\": \"proj-xyz\",\n \"has_more\": false\n}\n" post: tags: - - Assistants - summary: Create a thread. - operationId: createThread + - Projects + summary: 'Create a new project in the organization. Projects can be created and archived, but cannot be deleted.' + operationId: create-project requestBody: + description: The project create request payload. content: application/json: schema: - $ref: '#/components/schemas/CreateThreadRequest' + $ref: '#/components/schemas/ProjectCreateRequest' + required: true responses: '200': - description: OK + description: Project created successfully. content: application/json: schema: - $ref: '#/components/schemas/ThreadObject' + $ref: '#/components/schemas/Project' x-oaiMeta: - name: Create thread - group: threads - beta: true - returns: 'A [thread](/docs/api-reference/threads) object.' + name: Create project + group: administration + returns: 'The created [Project](/docs/api-reference/projects/object) object.' examples: - - title: Empty - request: - curl: "curl https://api.openai.com/v1/threads \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d ''\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nempty_thread = client.beta.threads.create()\nprint(empty_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const emptyThread = await openai.beta.threads.create();\n\n console.log(emptyThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699012949,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - - title: Messages - request: - curl: "curl https://api.openai.com/v1/threads \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-H \"OpenAI-Beta: assistants=v2\" \\\n-d '{\n \"messages\": [{\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n }, {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage_thread = client.beta.threads.create(\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n },\n {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n },\n ]\n)\n\nprint(message_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messageThread = await openai.beta.threads.create({\n messages: [\n {\n role: \"user\",\n content: \"Hello, what is AI?\"\n },\n {\n role: \"user\",\n content: \"How does AI work? Explain it in simple terms.\",\n },\n ],\n });\n\n console.log(messageThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - '/threads/{thread_id}': + request: + curl: "curl -X POST https://api.openai.com/v1/organization/projects \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project ABC\"\n }'\n" + response: + content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project ABC\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + '/organization/projects/{project_id}': get: tags: - - Assistants - summary: Retrieves a thread. - operationId: getThread + - Projects + summary: Retrieves a project. + operationId: retrieve-project parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to retrieve. + description: The ID of the project. required: true schema: type: string responses: '200': - description: OK + description: Project retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/ThreadObject' + $ref: '#/components/schemas/Project' x-oaiMeta: - name: Retrieve thread - group: threads - beta: true - returns: 'The [thread](/docs/api-reference/threads/object) object matching the specified ID.' + name: Retrieve project + group: administration + description: Retrieve a project. + returns: 'The [Project](/docs/api-reference/projects/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_thread = client.beta.threads.retrieve(\"thread_abc123\")\nprint(my_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myThread = await openai.beta.threads.retrieve(\n \"thread_abc123\"\n );\n\n console.log(myThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n }\n}\n" + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" post: tags: - - Assistants - summary: Modifies a thread. - operationId: modifyThread + - Projects + summary: Modifies a project in the organization. + operationId: modify-project parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to modify. Only the `metadata` can be modified. + description: The ID of the project. required: true schema: type: string requestBody: + description: The project update request payload. content: application/json: schema: - $ref: '#/components/schemas/ModifyThreadRequest' + $ref: '#/components/schemas/ProjectUpdateRequest' required: true responses: '200': - description: OK + description: Project updated successfully. content: application/json: schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Modify thread - group: threads - beta: true - returns: 'The modified [thread](/docs/api-reference/threads/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_thread = client.beta.threads.update(\n \"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n)\nprint(my_updated_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const updatedThread = await openai.beta.threads.update(\n \"thread_abc123\",\n {\n metadata: { modified: \"true\", user: \"abc123\" },\n }\n );\n\n console.log(updatedThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n },\n \"tool_resources\": {}\n}\n" - delete: - tags: - - Assistants - summary: Delete a thread. - operationId: deleteThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to delete. - required: true - schema: - type: string - responses: - '200': - description: OK + $ref: '#/components/schemas/Project' + '400': + description: Error response when updating the default project. content: application/json: schema: - $ref: '#/components/schemas/DeleteThreadResponse' + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Delete thread - group: threads - beta: true - returns: Deletion status + name: Modify project + group: administration + returns: 'The updated [Project](/docs/api-reference/projects/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.threads.delete(\"thread_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.threads.del(\"thread_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread.deleted\",\n \"deleted\": true\n}\n" - '/threads/{thread_id}/messages': + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project DEF\"\n }'\n" + '/organization/projects/{project_id}/api_keys': get: tags: - - Assistants - summary: Returns a list of messages for a given thread. - operationId: listMessages + - Projects + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) the messages belong to.' + description: The ID of the project. required: true schema: type: string @@ -1281,705 +1518,677 @@ paths: schema: type: integer default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: run_id - in: query - description: "Filter messages by the run ID that generated them.\n" - schema: - type: string responses: '200': - description: OK + description: Project API keys listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ListMessagesResponse' + $ref: '#/components/schemas/ProjectApiKeyListResponse' x-oaiMeta: - name: List messages - group: threads - beta: true - returns: 'A list of [message](/docs/api-reference/messages) objects.' + name: List project API keys + group: administration + returns: 'A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_messages = client.beta.threads.messages.list(\"thread_abc123\")\nprint(thread_messages.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.list(\n \"thread_abc123\"\n );\n\n console.log(threadMessages.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_abc456\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hello, what is AI?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc456\",\n \"has_more\": false\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n }\n ],\n \"first_id\": \"key_abc\",\n \"last_id\": \"key_xyz\",\n \"has_more\": false\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" + '/organization/projects/{project_id}/api_keys/{key_id}': + get: tags: - - Assistants - summary: Create a message. - operationId: createMessage + - Projects + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to create a message for.' + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateMessageRequest' - required: true responses: '200': - description: OK + description: Project API key retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/ProjectApiKey' x-oaiMeta: - name: Create message - group: threads - beta: true - returns: 'A [message](/docs/api-reference/messages/object) object.' + name: Retrieve project API key + group: administration + returns: 'The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_message = client.beta.threads.messages.create(\n \"thread_abc123\",\n role=\"user\",\n content=\"How does AI work? Explain it in simple terms.\",\n)\nprint(thread_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.create(\n \"thread_abc123\",\n { role: \"user\", content: \"How does AI work? Explain it in simple terms.\" }\n );\n\n console.log(threadMessages);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1713226573,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" - '/threads/{thread_id}/messages/{message_id}': - get: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" + delete: tags: - - Assistants - summary: Retrieve a message. - operationId: getMessage + - Projects + summary: Deletes an API key from the project. + operationId: delete-project-api-key parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this message belongs.' + description: The ID of the project. required: true schema: type: string - - name: message_id + - name: key_id in: path - description: The ID of the message to retrieve. + description: The ID of the API key. required: true schema: type: string responses: '200': - description: OK + description: Project API key deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Retrieve message - group: threads - beta: true - returns: 'The [message](/docs/api-reference/messages/object) object matching the specified ID.' + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a service account examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.retrieve(\n message_id=\"msg_abc123\",\n thread_id=\"thread_abc123\",\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.retrieve(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(message);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" + curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.api_key.deleted\",\n \"id\": \"key_abc\",\n \"deleted\": true\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"API keys cannot be deleted for service accounts, please delete the service account\"\n}\n" + '/organization/projects/{project_id}/archive': post: tags: - - Assistants - summary: Modifies a message. - operationId: modifyMessage + - Projects + summary: Archives a project in the organization. Archived projects cannot be used or updated. + operationId: archive-project parameters: - - name: thread_id - in: path - description: The ID of the thread to which this message belongs. - required: true - schema: - type: string - - name: message_id + - name: project_id in: path - description: The ID of the message to modify. + description: The ID of the project. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyMessageRequest' - required: true responses: '200': - description: OK + description: Project archived successfully. content: application/json: schema: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/Project' x-oaiMeta: - name: Modify message - group: threads - beta: true - returns: 'The modified [message](/docs/api-reference/messages/object) object.' + name: Archive project + group: administration + returns: 'The archived [Project](/docs/api-reference/projects/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.update(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\",\n },\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.update(\n \"thread_abc123\",\n \"msg_abc123\",\n {\n metadata: {\n modified: \"true\",\n user: \"abc123\",\n },\n }\n }'" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"file_ids\": [],\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n}\n" - delete: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project DEF\",\n \"created_at\": 1711471533,\n \"archived_at\": 1711471533,\n \"status\": \"archived\"\n}\n" + '/organization/projects/{project_id}/service_accounts': + get: tags: - - Assistants - summary: Deletes a message. - operationId: deleteMessage + - Projects + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to which this message belongs. + description: The ID of the project. required: true schema: type: string - - name: message_id - in: path - description: The ID of the message to delete. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string responses: '200': - description: OK + description: Project service accounts listed successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteMessageResponse' + $ref: '#/components/schemas/ProjectServiceAccountListResponse' + '400': + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Delete message - group: threads - beta: true - returns: Deletion status + name: List project service accounts + group: administration + returns: 'A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects.' examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_message = client.beta.threads.messages.delete(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n)\nprint(deleted_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedMessage = await openai.beta.threads.messages.del(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(deletedMessage);\n}" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message.deleted\",\n \"deleted\": true\n}\n" - /threads/runs: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n ],\n \"first_id\": \"svc_acct_abc\",\n \"last_id\": \"svc_acct_xyz\",\n \"has_more\": false\n}\n" post: tags: - - Assistants - summary: Create a thread and run it in one request. - operationId: createThreadAndRun + - Projects + summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. + operationId: create-project-service-account + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string requestBody: + description: The project service account create request payload. content: application/json: schema: - $ref: '#/components/schemas/CreateThreadAndRunRequest' + $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' required: true responses: '200': - description: OK + description: Project service account created successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' + '400': + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Create thread and run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' + name: Create project service account + group: administration + returns: 'The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.create_and_run(\n assistant_id=\"asst_abc123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_abc123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Explain deep learning to a 5 year old.\" },\n ],\n },\n });\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076792,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1699077392,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant.\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.create_and_run(\n assistant_id=\"asst_123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Hello\" },\n ],\n },\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710348075,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}], \"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\n{\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1713226836,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1713226837,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.create_and_run(\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"What is the weather like in San Francisco?\" },\n ],\n },\n tools: tools,\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710351818,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"\",\"output\":null}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"{\\\"\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"location\"}}]}}}\n\n...\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"ahrenheit\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"\\\"}\"}}]}}}\n\nevent: thread.run.requires_action\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"requires_action\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":{\"type\":\"submit_tool_outputs\",\"submit_tool_outputs\":{\"tool_calls\":[{\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\"}}]}},\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs': + request: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Production App\"\n }'\n" + response: + content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Production App\",\n \"role\": \"member\",\n \"created_at\": 1711471533,\n \"api_key\": {\n \"object\": \"organization.project.service_account.api_key\",\n \"value\": \"sk-abcdefghijklmnop123\",\n \"name\": \"Secret Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\"\n }\n}\n" + '/organization/projects/{project_id}/service_accounts/{service_account_id}': get: tags: - - Assistants - summary: Returns a list of runs belonging to a thread. - operationId: listRuns + - Projects + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread the run belongs to. + description: The ID of the project. required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + - name: service_account_id + in: path + description: The ID of the service account. + required: true schema: type: string responses: '200': - description: OK + description: Project service account retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/ListRunsResponse' + $ref: '#/components/schemas/ProjectServiceAccount' x-oaiMeta: - name: List runs - group: threads - beta: true - returns: 'A list of [run](/docs/api-reference/runs/object) objects.' + name: Retrieve project service account + group: administration + returns: 'The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nruns = client.beta.threads.runs.list(\n \"thread_abc123\"\n)\n\nprint(runs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const runs = await openai.beta.threads.runs.list(\n \"thread_abc123\"\n );\n\n console.log(runs);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n },\n {\n \"id\": \"run_abc456\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n }\n ],\n \"first_id\": \"run_abc123\",\n \"last_id\": \"run_abc456\",\n \"has_more\": false\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" + delete: tags: - - Assistants - summary: Create a run. - operationId: createRun + - Projects + summary: Deletes a service account from the project. + operationId: delete-project-service-account parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to run. + description: The ID of the project. required: true schema: type: string - - name: 'include[]' - in: query - description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + - name: service_account_id + in: path + description: The ID of the service account. + required: true schema: - type: array - items: - enum: - - 'step_details.tool_calls[*].file_search.results[*].content' - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateRunRequest' - required: true + type: string responses: '200': - description: OK + description: Project service account deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' x-oaiMeta: - name: Create run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' + name: Delete project service account + group: administration + returns: 'Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n { assistant_id: \"asst_abc123\" }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_123\",\n assistant_id=\"asst_123\",\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_123\",\n { assistant_id: \"asst_123\", stream: true }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710330641,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710330642,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710330642,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710330641,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710330642,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n {\n assistant_id: \"asst_abc123\",\n tools: tools,\n stream: true\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710348075,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710348075,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710348077,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}': + request: + curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.service_account.deleted\",\n \"id\": \"svc_acct_abc\",\n \"deleted\": true\n}\n" + '/organization/projects/{project_id}/users': get: tags: - - Assistants - summary: Retrieves a run. - operationId: getRun + - Projects + summary: Returns a list of users in the project. + operationId: list-project-users parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + description: The ID of the project. required: true schema: type: string - - name: run_id - in: path - description: The ID of the run to retrieve. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string responses: '200': - description: OK + description: Project users listed successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectUserListResponse' + '400': + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Retrieve run - group: threads - beta: true - returns: 'The [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: List project users + group: administration + returns: 'A list of [ProjectUser](/docs/api-reference/project-users/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.retrieve(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" post: tags: - - Assistants - summary: Modifies a run. - operationId: modifyRun + - Projects + summary: Adds a user to the project. Users must already be members of the organization to be added to a project. + operationId: create-project-user parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - required: true - schema: - type: string - - name: run_id + - name: project_id in: path - description: The ID of the run to modify. + description: The ID of the project. required: true schema: type: string requestBody: + description: The project user create request payload. content: application/json: schema: - $ref: '#/components/schemas/ModifyRunRequest' + $ref: '#/components/schemas/ProjectUserCreateRequest' required: true responses: '200': - description: OK + description: User added to project successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectUser' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Modify run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: Create project user + group: administration + returns: 'The created [ProjectUser](/docs/api-reference/project-users/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.update(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n metadata={\"user_id\": \"user_abc123\"},\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.update(\n \"thread_abc123\",\n \"run_abc123\",\n {\n metadata: {\n user_id: \"user_abc123\",\n },\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/submit_tool_outputs': - post: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"user_id\": \"user_abc\",\n \"role\": \"member\"\n }'\n" + response: + content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" + '/organization/projects/{project_id}/users/{user_id}': + get: tags: - - Assistants - summary: "When a run has the `status: \"requires_action\"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n" - operationId: submitToolOuputsToRun + - Projects + summary: Retrieves a user in the project. + operationId: retrieve-project-user parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this run belongs.' + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: user_id in: path - description: The ID of the run that requires the tool output submission. + description: The ID of the user. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SubmitToolOutputsRunRequest' - required: true responses: '200': - description: OK + description: Project user retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectUser' x-oaiMeta: - name: Submit tool outputs to run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: Retrieve project user + group: administration + returns: 'The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075592,\n \"assistant_id\": \"asst_123\",\n \"thread_id\": \"thread_123\",\n \"status\": \"queued\",\n \"started_at\": 1699075592,\n \"expires_at\": 1699076192,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710352449,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352475,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"id\":\"call_iWr0kQ2EaYMaxNdl0v3KYkx7\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\",\"output\":\"70 degrees and sunny.\"}}]},\"usage\":{\"prompt_tokens\":291,\"completion_tokens\":24,\"total_tokens\":315}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":1710352448,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710352475,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"The\",\"annotations\":[]}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" current\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" weather\"}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" sunny\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\".\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710352477,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352477,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":{\"prompt_tokens\":329,\"completion_tokens\":18,\"total_tokens\":347}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710352475,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710352477,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}/cancel': + request: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" post: tags: - - Assistants - summary: Cancels a run that is `in_progress`. - operationId: cancelRun + - Projects + summary: Modifies a user's role in the project. + operationId: modify-project-user parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to which this run belongs. + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: user_id in: path - description: The ID of the run to cancel. + description: The ID of the user. required: true schema: type: string + requestBody: + description: The project user update request payload. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserUpdateRequest' + required: true responses: '200': - description: OK + description: Project user's role updated successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectUser' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Cancel a run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: Modify project user + group: administration + returns: 'The updated [ProjectUser](/docs/api-reference/project-users/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.cancel(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.cancel(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076126,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"cancelling\",\n \"started_at\": 1699076126,\n \"expires_at\": 1699076726,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You summarize books.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps': - get: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" + response: + content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + delete: tags: - - Assistants - summary: Returns a list of run steps belonging to a run. - operationId: listRunSteps + - Projects + summary: Deletes a user from the project. + operationId: delete-project-user parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread the run and run steps belong to. + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: user_id in: path - description: The ID of the run the run steps belong to. + description: The ID of the user. required: true schema: type: string + responses: + '200': + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserDeleteResponse' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Delete project user + group: administration + returns: 'Confirmation that project has been deleted or an error in case of an archived project, which has no users' + examples: + request: + curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" + /organization/users: + get: + tags: + - Users + summary: Lists all of the users in the organization. + operationId: list-users + parameters: - name: limit in: query description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: type: integer default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: 'include[]' - in: query - description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" - schema: - type: array - items: - enum: - - 'step_details.tool_calls[*].file_search.results[*].content' - type: string responses: '200': - description: OK + description: Users listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ListRunStepsResponse' + $ref: '#/components/schemas/UserListResponse' x-oaiMeta: - name: List run steps - group: threads - beta: true - returns: 'A list of [run step](/docs/api-reference/run-steps/step-object) objects.' + name: List users + group: administration + returns: 'A list of [User](/docs/api-reference/users/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_steps = client.beta.threads.runs.steps.list(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run_steps)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.list(\n \"thread_abc123\",\n \"run_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n }\n ],\n \"first_id\": \"step_abc123\",\n \"last_id\": \"step_abc456\",\n \"has_more\": false\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps/{step_id}': + curl: "curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" + '/organization/users/{user_id}': get: tags: - - Assistants - summary: Retrieves a run step. - operationId: getRunStep + - Users + summary: Retrieves a user by their identifier. + operationId: retrieve-user parameters: - - name: thread_id - in: path - description: The ID of the thread to which the run and run step belongs. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to which the run step belongs. - required: true - schema: - type: string - - name: step_id + - name: user_id in: path - description: The ID of the run step to retrieve. + description: The ID of the user. required: true schema: type: string - - name: 'include[]' - in: query - description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" - schema: - type: array - items: - enum: - - 'step_details.tool_calls[*].file_search.results[*].content' - type: string responses: '200': - description: OK + description: User retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/RunStepObject' + $ref: '#/components/schemas/User' x-oaiMeta: - name: Retrieve run step - group: threads - beta: true - returns: 'The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID.' + name: Retrieve user + group: administration + returns: 'The [User](/docs/api-reference/users/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_step = client.beta.threads.runs.steps.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n step_id=\"step_abc123\"\n)\n\nprint(run_step)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.retrieve(\n \"thread_abc123\",\n \"run_abc123\",\n \"step_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - /vector_stores: - get: + curl: "curl https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + post: tags: - - Vector Stores - summary: Returns a list of vector stores. - operationId: listVectorStores + - Users + summary: Modifies a user's role in the organization. + operationId: modify-user parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + - name: user_id + in: path + description: The ID of the user. + required: true schema: type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + content: + application/json: + schema: + $ref: '#/components/schemas/UserRoleUpdateRequest' + required: true + responses: + '200': + description: User role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/User' + x-oaiMeta: + name: Modify user + group: administration + returns: 'The updated [User](/docs/api-reference/users/object) object.' + examples: + request: + curl: "curl -X POST https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" + response: + content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + delete: + tags: + - Users + summary: Deletes a user from the organization. + operationId: delete-user + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true schema: type: string + responses: + '200': + description: User deleted successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/UserDeleteResponse' + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" + /threads: + post: + tags: + - Assistants + summary: Create a thread. + operationId: createThread + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThreadRequest' responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/ListVectorStoresResponse' + $ref: '#/components/schemas/ThreadObject' x-oaiMeta: - name: List vector stores - group: vector_stores + name: Create thread + group: threads beta: true - returns: 'A list of [vector store](/docs/api-reference/vector-stores/object) objects.' + returns: 'A [thread](/docs/api-reference/threads) object.' examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_stores = client.beta.vector_stores.list()\nprint(vector_stores)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStores = await openai.beta.vectorStores.list();\n console.log(vectorStores);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n },\n {\n \"id\": \"vs_abc456\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ v2\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n }\n ],\n \"first_id\": \"vs_abc123\",\n \"last_id\": \"vs_abc456\",\n \"has_more\": false\n}\n" + - title: Empty + request: + curl: "curl https://api.openai.com/v1/threads \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d ''\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nempty_thread = client.beta.threads.create()\nprint(empty_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const emptyThread = await openai.beta.threads.create();\n\n console.log(emptyThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699012949,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" + - title: Messages + request: + curl: "curl https://api.openai.com/v1/threads \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-H \"OpenAI-Beta: assistants=v2\" \\\n-d '{\n \"messages\": [{\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n }, {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage_thread = client.beta.threads.create(\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n },\n {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n },\n ]\n)\n\nprint(message_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messageThread = await openai.beta.threads.create({\n messages: [\n {\n role: \"user\",\n content: \"Hello, what is AI?\"\n },\n {\n role: \"user\",\n content: \"How does AI work? Explain it in simple terms.\",\n },\n ],\n });\n\n console.log(messageThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" + /threads/runs: post: tags: - - Vector Stores - summary: Create a vector store. - operationId: createVectorStore + - Assistants + summary: Create a thread and run it in one request. + operationId: createThreadAndRun requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateVectorStoreRequest' + $ref: '#/components/schemas/CreateThreadAndRunRequest' required: true responses: '200': @@ -1987,28 +2196,41 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Create vector store - group: vector_stores + name: Create thread and run + group: threads beta: true - returns: 'A [vector store](/docs/api-reference/vector-stores/object) object.' + returns: 'A [run](/docs/api-reference/runs/object) object.' examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.create(\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.create({\n name: \"Support FAQ\"\n });\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - '/vector_stores/{vector_store_id}': + - title: Default + request: + curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.create_and_run(\n assistant_id=\"asst_abc123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_abc123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Explain deep learning to a 5 year old.\" },\n ],\n },\n });\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076792,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1699077392,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant.\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.create_and_run(\n assistant_id=\"asst_123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Hello\" },\n ],\n },\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710348075,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}], \"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\n{\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1713226836,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1713226837,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: done\ndata: [DONE]\n" + - title: Streaming with Functions + request: + curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.create_and_run(\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"What is the weather like in San Francisco?\" },\n ],\n },\n tools: tools,\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710351818,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"\",\"output\":null}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"{\\\"\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"location\"}}]}}}\n\n...\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"ahrenheit\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"\\\"}\"}}]}}}\n\nevent: thread.run.requires_action\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"requires_action\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":{\"type\":\"submit_tool_outputs\",\"submit_tool_outputs\":{\"tool_calls\":[{\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\"}}]}},\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + '/threads/{thread_id}': get: tags: - - Vector Stores - summary: Retrieves a vector store. - operationId: getVectorStore + - Assistants + summary: Retrieves a thread. + operationId: getThread parameters: - - name: vector_store_id + - name: thread_id in: path - description: The ID of the vector store to retrieve. + description: The ID of the thread to retrieve. required: true schema: type: string @@ -2018,27 +2240,27 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/ThreadObject' x-oaiMeta: - name: Retrieve vector store - group: vector_stores + name: Retrieve thread + group: threads beta: true - returns: 'The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID.' + returns: 'The [thread](/docs/api-reference/threads/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.retrieve(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.retrieve(\n \"vs_abc123\"\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_thread = client.beta.threads.retrieve(\"thread_abc123\")\nprint(my_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myThread = await openai.beta.threads.retrieve(\n \"thread_abc123\"\n );\n\n console.log(myThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n }\n}\n" post: tags: - - Vector Stores - summary: Modifies a vector store. - operationId: modifyVectorStore + - Assistants + summary: Modifies a thread. + operationId: modifyThread parameters: - - name: vector_store_id + - name: thread_id in: path - description: The ID of the vector store to modify. + description: The ID of the thread to modify. Only the `metadata` can be modified. required: true schema: type: string @@ -2046,7 +2268,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/UpdateVectorStoreRequest' + $ref: '#/components/schemas/ModifyThreadRequest' required: true responses: '200': @@ -2054,27 +2276,27 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/ThreadObject' x-oaiMeta: - name: Modify vector store - group: vector_stores + name: Modify thread + group: threads beta: true - returns: 'The modified [vector store](/docs/api-reference/vector-stores/object) object.' + returns: 'The modified [thread](/docs/api-reference/threads/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.update(\n vector_store_id=\"vs_abc123\",\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.update(\n \"vs_abc123\",\n {\n name: \"Support FAQ\"\n }\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_thread = client.beta.threads.update(\n \"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n)\nprint(my_updated_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const updatedThread = await openai.beta.threads.update(\n \"thread_abc123\",\n {\n metadata: { modified: \"true\", user: \"abc123\" },\n }\n );\n\n console.log(updatedThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n },\n \"tool_resources\": {}\n}\n" delete: tags: - - Vector Stores - summary: Delete a vector store. - operationId: deleteVectorStore + - Assistants + summary: Delete a thread. + operationId: deleteThread parameters: - - name: vector_store_id + - name: thread_id in: path - description: The ID of the vector store to delete. + description: The ID of the thread to delete. required: true schema: type: string @@ -2084,28 +2306,28 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/DeleteVectorStoreResponse' + $ref: '#/components/schemas/DeleteThreadResponse' x-oaiMeta: - name: Delete vector store - group: vector_stores + name: Delete thread + group: threads beta: true returns: Deletion status examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store = client.beta.vector_stores.delete(\n vector_store_id=\"vs_abc123\"\n)\nprint(deleted_vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStore = await openai.beta.vectorStores.del(\n \"vs_abc123\"\n );\n console.log(deletedVectorStore);\n}\n\nmain();\n" - response: "{\n id: \"vs_abc123\",\n object: \"vector_store.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/files': + curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.threads.delete(\"thread_abc123\")\nprint(response)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.threads.del(\"thread_abc123\");\n\n console.log(response);\n}\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread.deleted\",\n \"deleted\": true\n}\n" + '/threads/{thread_id}/messages': get: tags: - - Vector Stores - summary: Returns a list of vector store files. - operationId: listVectorStoreFiles - parameters: - - name: vector_store_id + - Assistants + summary: Returns a list of messages for a given thread. + operationId: listMessages + parameters: + - name: thread_id in: path - description: The ID of the vector store that the files belong to. + description: 'The ID of the [thread](/docs/api-reference/threads) the messages belong to.' required: true schema: type: string @@ -2134,15 +2356,10 @@ paths: description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string - - name: filter + - name: run_id in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' + description: "Filter messages by the run ID that generated them.\n" schema: - enum: - - in_progress - - completed - - failed - - cancelled type: string responses: '200': @@ -2150,36 +2367,35 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' + $ref: '#/components/schemas/ListMessagesResponse' x-oaiMeta: - name: List vector store files - group: vector_stores + name: List messages + group: threads beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' + returns: 'A list of [message](/docs/api-reference/messages) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.files.list(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.files.list(\n \"vs_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_messages = client.beta.threads.messages.list(\"thread_abc123\")\nprint(thread_messages.data)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.list(\n \"thread_abc123\"\n );\n\n console.log(threadMessages.data);\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_abc456\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hello, what is AI?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc456\",\n \"has_more\": false\n}\n" post: tags: - - Vector Stores - summary: 'Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).' - operationId: createVectorStoreFile + - Assistants + summary: Create a message. + operationId: createMessage parameters: - - name: vector_store_id + - name: thread_id in: path - description: "The ID of the vector store for which to create a File.\n" + description: 'The ID of the [thread](/docs/api-reference/threads) to create a message for.' required: true schema: type: string - example: vs_abc123 requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateVectorStoreFileRequest' + $ref: '#/components/schemas/CreateMessageRequest' required: true responses: '200': @@ -2187,112 +2403,78 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileObject' + $ref: '#/components/schemas/MessageObject' x-oaiMeta: - name: Create vector store file - group: vector_stores + name: Create message + group: threads beta: true - returns: 'A [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' + returns: 'A [message](/docs/api-reference/messages/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_id\": \"file-abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.create(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFile = await openai.beta.vectorStores.files.create(\n \"vs_abc123\",\n {\n file_id: \"file-abc123\"\n }\n );\n console.log(myVectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"usage_bytes\": 1234,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - '/vector_stores/{vector_store_id}/files/{file_id}': + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_message = client.beta.threads.messages.create(\n \"thread_abc123\",\n role=\"user\",\n content=\"How does AI work? Explain it in simple terms.\",\n)\nprint(thread_message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.create(\n \"thread_abc123\",\n { role: \"user\", content: \"How does AI work? Explain it in simple terms.\" }\n );\n\n console.log(threadMessages);\n}\n\nmain();" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1713226573,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" + '/threads/{thread_id}/messages/{message_id}': get: tags: - - Vector Stores - summary: Retrieves a vector store file. - operationId: getVectorStoreFile + - Assistants + summary: Retrieve a message. + operationId: getMessage parameters: - - name: vector_store_id + - name: thread_id in: path - description: The ID of the vector store that the file belongs to. + description: 'The ID of the [thread](/docs/api-reference/threads) to which this message belongs.' required: true schema: type: string - example: vs_abc123 - - name: file_id + - name: message_id in: path - description: The ID of the file being retrieved. + description: The ID of the message to retrieve. required: true schema: type: string - example: file-abc123 responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileObject' + $ref: '#/components/schemas/MessageObject' x-oaiMeta: - name: Retrieve vector store file - group: vector_stores + name: Retrieve message + group: threads beta: true - returns: 'The [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' + returns: 'The [message](/docs/api-reference/messages/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.retrieve(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFile = await openai.beta.vectorStores.files.retrieve(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(vectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - delete: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.retrieve(\n message_id=\"msg_abc123\",\n thread_id=\"thread_abc123\",\n)\nprint(message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.retrieve(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(message);\n}\n\nmain();" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" + post: tags: - - Vector Stores - summary: 'Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.' - operationId: deleteVectorStoreFile + - Assistants + summary: Modifies a message. + operationId: modifyMessage parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. - required: true - schema: - type: string - - name: file_id + - name: thread_id in: path - description: The ID of the file to delete. + description: The ID of the thread to which this message belongs. required: true schema: type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteVectorStoreFileResponse' - x-oaiMeta: - name: Delete vector store file - group: vector_stores - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file = client.beta.vector_stores.files.delete(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(deleted_vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFile = await openai.beta.vectorStores.files.del(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(deletedVectorStoreFile);\n}\n\nmain();\n" - response: "{\n id: \"file-abc123\",\n object: \"vector_store.file.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/file_batches': - post: - tags: - - Vector Stores - summary: Create a vector store file batch. - operationId: createVectorStoreFileBatch - parameters: - - name: vector_store_id + - name: message_id in: path - description: "The ID of the vector store for which to create a File Batch.\n" + description: The ID of the message to modify. required: true schema: type: string - example: vs_abc123 requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateVectorStoreFileBatchRequest' + $ref: '#/components/schemas/ModifyMessageRequest' required: true responses: '200': @@ -2300,73 +2482,33 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Create vector store file batch - group: vector_stores - beta: true - returns: 'A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_ids\": [\"file-abc123\", \"file-abc456\"]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.create(\n vector_store_id=\"vs_abc123\",\n file_ids=[\"file-abc123\", \"file-abc456\"]\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create(\n \"vs_abc123\",\n {\n file_ids: [\"file-abc123\", \"file-abc456\"]\n }\n );\n console.log(myVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store file batch. - operationId: getVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - example: vs_abc123 - - name: batch_id - in: path - description: The ID of the file batch being retrieved. - required: true - schema: - type: string - example: vsfb_abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' + $ref: '#/components/schemas/MessageObject' x-oaiMeta: - name: Retrieve vector store file batch - group: vector_stores + name: Modify message + group: threads beta: true - returns: 'The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' + returns: 'The modified [message](/docs/api-reference/messages/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.retrieve(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel': - post: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.update(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\",\n },\n)\nprint(message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.update(\n \"thread_abc123\",\n \"msg_abc123\",\n {\n metadata: {\n modified: \"true\",\n user: \"abc123\",\n },\n }\n }'" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"file_ids\": [],\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n}\n" + delete: tags: - - Vector Stores - summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. - operationId: cancelVectorStoreFileBatch + - Assistants + summary: Deletes a message. + operationId: deleteMessage parameters: - - name: vector_store_id + - name: thread_id in: path - description: The ID of the vector store that the file batch belongs to. + description: The ID of the thread to which this message belongs. required: true schema: type: string - - name: batch_id + - name: message_id in: path - description: The ID of the file batch to cancel. + description: The ID of the message to delete. required: true schema: type: string @@ -2376,34 +2518,28 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' + $ref: '#/components/schemas/DeleteMessageResponse' x-oaiMeta: - name: Cancel vector store file batch - group: vector_stores + name: Delete message + group: threads beta: true - returns: The modified vector store file batch object. + returns: Deletion status examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel(\n vector_store_id=\"vs_abc123\",\n file_batch_id=\"vsfb_abc123\"\n)\nprint(deleted_vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(deletedVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"cancelling\",\n \"file_counts\": {\n \"in_progress\": 12,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 15,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/files': + curl: "curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_message = client.beta.threads.messages.delete(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n)\nprint(deleted_message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedMessage = await openai.beta.threads.messages.del(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(deletedMessage);\n}" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message.deleted\",\n \"deleted\": true\n}\n" + '/threads/{thread_id}/runs': get: tags: - - Vector Stores - summary: Returns a list of vector store files in a batch. - operationId: listFilesInVectorStoreBatch + - Assistants + summary: Returns a list of runs belonging to a thread. + operationId: listRuns parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. - required: true - schema: - type: string - - name: batch_id + - name: thread_id in: path - description: The ID of the file batch that the files belong to. + description: The ID of the thread the run belongs to. required: true schema: type: string @@ -2432,518 +2568,503 @@ paths: description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string - - name: filter - in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' - schema: - enum: - - in_progress - - completed - - failed - - cancelled - type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' + $ref: '#/components/schemas/ListRunsResponse' x-oaiMeta: - name: List vector store files in a batch - group: vector_stores + name: List runs + group: threads beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' + returns: 'A list of [run](/docs/api-reference/runs/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.file_batches.list_files(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - /batches: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nruns = client.beta.threads.runs.list(\n \"thread_abc123\"\n)\n\nprint(runs)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const runs = await openai.beta.threads.runs.list(\n \"thread_abc123\"\n );\n\n console.log(runs);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n },\n {\n \"id\": \"run_abc456\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n }\n ],\n \"first_id\": \"run_abc123\",\n \"last_id\": \"run_abc456\",\n \"has_more\": false\n}\n" post: tags: - - Batch - summary: Creates and executes a batch from an uploaded file of requests - operationId: createBatch + - Assistants + summary: Create a run. + operationId: createRun + parameters: + - name: thread_id + in: path + description: The ID of the thread to run. + required: true + schema: + type: string + - name: 'include[]' + in: query + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + schema: + type: array + items: + enum: + - 'step_details.tool_calls[*].file_search.results[*].content' + type: string requestBody: content: application/json: schema: - required: - - input_file_id - - endpoint - - completion_window - type: object - properties: - input_file_id: - type: string - description: "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size.\n" - endpoint: - enum: - - /v1/chat/completions - - /v1/embeddings - - /v1/completions - type: string - description: 'The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.' - completion_window: - enum: - - 24h - type: string - description: The time frame within which the batch should be processed. Currently only `24h` is supported. - metadata: - type: object - additionalProperties: - type: string - description: Optional custom metadata for the batch. - nullable: true + $ref: '#/components/schemas/CreateRunRequest' required: true responses: '200': - description: Batch created successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Batch' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Create batch - group: batch - returns: 'The created [Batch](/docs/api-reference/batch/object) object.' + name: Create run + group: threads + beta: true + returns: 'A [run](/docs/api-reference/runs/object) object.' examples: - request: - curl: "curl https://api.openai.com/v1/batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input_file_id\": \"file-abc123\",\n \"endpoint\": \"/v1/chat/completions\",\n \"completion_window\": \"24h\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.create(\n input_file_id=\"file-abc123\",\n endpoint=\"/v1/chat/completions\",\n completion_window=\"24h\"\n)\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.create({\n input_file_id: \"file-abc123\",\n endpoint: \"/v1/chat/completions\",\n completion_window: \"24h\"\n });\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"validating\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": null,\n \"expires_at\": null,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 0,\n \"completed\": 0,\n \"failed\": 0\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + - title: Default + request: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\"\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n { assistant_id: \"asst_abc123\" }\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/threads/thread_123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_123\",\n assistant_id=\"asst_123\",\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_123\",\n { assistant_id: \"asst_123\", stream: true }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710330641,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710330642,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710330642,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710330641,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710330642,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + - title: Streaming with Functions + request: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n {\n assistant_id: \"asst_abc123\",\n tools: tools,\n stream: true\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710348075,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710348075,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710348077,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + '/threads/{thread_id}/runs/{run_id}': get: tags: - - Batch - summary: List your organization's batches. - operationId: listBatches + - Assistants + summary: Retrieves a run. + operationId: getRun parameters: - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + - name: thread_id + in: path + description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + - name: run_id + in: path + description: The ID of the run to retrieve. + required: true schema: - type: integer - default: 20 + type: string responses: '200': - description: Batch listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ListBatchesResponse' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: List batch - group: batch - returns: 'A list of paginated [Batch](/docs/api-reference/batch/object) objects.' + name: Retrieve run + group: threads + beta: true + returns: 'The [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/batches?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.list()\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.batches.list();\n\n for await (const batch of list) {\n console.log(batch);\n }\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly job\",\n }\n },\n { ... },\n ],\n \"first_id\": \"batch_abc123\",\n \"last_id\": \"batch_abc456\",\n \"has_more\": true\n}\n" - '/batches/{batch_id}': - get: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.retrieve(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + post: tags: - - Batch - summary: Retrieves a batch. - operationId: retrieveBatch + - Assistants + summary: Modifies a run. + operationId: modifyRun parameters: - - name: batch_id + - name: thread_id in: path - description: The ID of the batch to retrieve. + description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run to modify. required: true schema: type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyRunRequest' + required: true responses: '200': - description: Batch retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Batch' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Retrieve batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' + name: Modify run + group: threads + beta: true + returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.retrieve(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.retrieve(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - '/batches/{batch_id}/cancel': + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.update(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n metadata={\"user_id\": \"user_abc123\"},\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.update(\n \"thread_abc123\",\n \"run_abc123\",\n {\n metadata: {\n user_id: \"user_abc123\",\n },\n }\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + '/threads/{thread_id}/runs/{run_id}/cancel': post: tags: - - Batch - summary: 'Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.' - operationId: cancelBatch + - Assistants + summary: Cancels a run that is `in_progress`. + operationId: cancelRun parameters: - - name: batch_id + - name: thread_id in: path - description: The ID of the batch to cancel. + description: The ID of the thread to which this run belongs. + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run to cancel. required: true schema: type: string responses: '200': - description: Batch is cancelling. Returns the cancelling batch's details. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Batch' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Cancel batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' + name: Cancel a run + group: threads + beta: true + returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.cancel(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.cancel(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"cancelling\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": 1711475133,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 23,\n \"failed\": 1\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - /organization/audit_logs: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.cancel(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.cancel(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076126,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"cancelling\",\n \"started_at\": 1699076126,\n \"expires_at\": 1699076726,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You summarize books.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + '/threads/{thread_id}/runs/{run_id}/steps': get: tags: - - Audit Logs - summary: List user actions and configuration changes within this organization. - operationId: list-audit-logs + - Assistants + summary: Returns a list of run steps belonging to a run. + operationId: listRunSteps parameters: - - name: effective_at + - name: thread_id + in: path + description: The ID of the thread the run and run steps belong to. + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run the run steps belong to. + required: true + schema: + type: string + - name: limit in: query - description: Return only events whose `effective_at` (Unix seconds) is in this range. + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: - type: object - properties: - gt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than this value. - gte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. - lt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than this value. - lte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. - - name: 'project_ids[]' + type: integer + default: 20 + - name: order in: query - description: Return only events for these projects. + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" schema: - type: array - items: - type: string - - name: 'event_types[]' + enum: + - asc + - desc + type: string + default: desc + - name: after in: query - description: 'Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object).' + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: - type: array - items: - $ref: '#/components/schemas/AuditLogEventType' - - name: 'actor_ids[]' + type: string + - name: before in: query - description: 'Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID.' + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: - type: array - items: - type: string - - name: 'actor_emails[]' + type: string + - name: 'include[]' in: query - description: Return only events performed by users with these emails. + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" schema: type: array items: + enum: + - 'step_details.tool_calls[*].file_search.results[*].content' type: string - - name: 'resource_ids[]' - in: query - description: 'Return only events performed on these targets. For example, a project ID updated.' + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListRunStepsResponse' + x-oaiMeta: + name: List run steps + group: threads + beta: true + returns: 'A list of [run step](/docs/api-reference/run-steps/step-object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_steps = client.beta.threads.runs.steps.list(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run_steps)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.list(\n \"thread_abc123\",\n \"run_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n }\n ],\n \"first_id\": \"step_abc123\",\n \"last_id\": \"step_abc456\",\n \"has_more\": false\n}\n" + '/threads/{thread_id}/runs/{run_id}/steps/{step_id}': + get: + tags: + - Assistants + summary: Retrieves a run step. + operationId: getRunStep + parameters: + - name: thread_id + in: path + description: The ID of the thread to which the run and run step belongs. + required: true schema: - type: array - items: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + type: string + - name: run_id + in: path + description: The ID of the run to which the run step belongs. + required: true schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + type: string + - name: step_id + in: path + description: The ID of the run step to retrieve. + required: true schema: type: string - - name: before + - name: 'include[]' in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" schema: - type: string + type: array + items: + enum: + - 'step_details.tool_calls[*].file_search.results[*].content' + type: string responses: '200': - description: Audit logs listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ListAuditLogsResponse' + $ref: '#/components/schemas/RunStepObject' x-oaiMeta: - name: List audit logs - group: audit-logs - returns: 'A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects.' + name: Retrieve run step + group: threads + beta: true + returns: 'The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/organization/audit_logs \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\" \\\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"audit_log-xxx_yyyymmdd\",\n \"type\": \"project.archived\",\n \"effective_at\": 1722461446,\n \"actor\": {\n \"type\": \"api_key\",\n \"api_key\": {\n \"type\": \"user\",\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n }\n }\n },\n \"project.archived\": {\n \"id\": \"proj_abc\"\n },\n },\n {\n \"id\": \"audit_log-yyy__20240101\",\n \"type\": \"api_key.updated\",\n \"effective_at\": 1720804190,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.updated\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource_2.operation_2\"]\n }\n },\n }\n ],\n \"first_id\": \"audit_log-xxx__20240101\",\n \"last_id\": \"audit_log_yyy__20240101\",\n \"has_more\": true\n}\n" - /organization/invites: - get: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_step = client.beta.threads.runs.steps.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n step_id=\"step_abc123\"\n)\n\nprint(run_step)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.retrieve(\n \"thread_abc123\",\n \"run_abc123\",\n \"step_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" + response: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" + '/threads/{thread_id}/runs/{run_id}/submit_tool_outputs': + post: tags: - - Invites - summary: Returns a list of invites in the organization. - operationId: list-invites + - Assistants + summary: "When a run has the `status: \"requires_action\"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n" + operationId: submitToolOuputsToRun parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + - name: thread_id + in: path + description: 'The ID of the [thread](/docs/api-reference/threads) to which this run belongs.' + required: true schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + type: string + - name: run_id + in: path + description: The ID of the run that requires the tool output submission. + required: true schema: type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SubmitToolOutputsRunRequest' + required: true responses: '200': - description: Invites listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/InviteListResponse' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: List invites - group: administration - returns: 'A list of [Invite](/docs/api-reference/invite/object) objects.' + name: Submit tool outputs to run + group: threads + beta: true + returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: - request: - curl: "curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n }\n ],\n \"first_id\": \"invite-abc\",\n \"last_id\": \"invite-abc\",\n \"has_more\": false\n}\n" + - title: Default + request: + curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075592,\n \"assistant_id\": \"asst_123\",\n \"thread_id\": \"thread_123\",\n \"status\": \"queued\",\n \"started_at\": 1699075592,\n \"expires_at\": 1699076192,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710352449,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352475,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"id\":\"call_iWr0kQ2EaYMaxNdl0v3KYkx7\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\",\"output\":\"70 degrees and sunny.\"}}]},\"usage\":{\"prompt_tokens\":291,\"completion_tokens\":24,\"total_tokens\":315}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":1710352448,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710352475,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"The\",\"annotations\":[]}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" current\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" weather\"}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" sunny\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\".\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710352477,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352477,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":{\"prompt_tokens\":329,\"completion_tokens\":18,\"total_tokens\":347}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710352475,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710352477,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + /uploads: post: tags: - - Invites - summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. - operationId: inviteUser + - Uploads + summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search/supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" + operationId: createUpload requestBody: - description: The invite request payload. content: application/json: schema: - $ref: '#/components/schemas/InviteRequest' + $ref: '#/components/schemas/CreateUploadRequest' required: true responses: '200': - description: User invited successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Invite' + $ref: '#/components/schemas/Upload' x-oaiMeta: - name: Create invite - group: administration - returns: 'The created [Invite](/docs/api-reference/invite/object) object.' + name: Create upload + group: uploads + returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `pending`.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/invites \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"email\": \"user@example.com\",\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": null\n}\n" - '/organization/invites/{invite_id}': - get: + curl: "curl https://api.openai.com/v1/uploads \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"purpose\": \"fine-tune\",\n \"filename\": \"training_examples.jsonl\",\n \"bytes\": 2147483648,\n \"mime_type\": \"text/jsonl\"\n }'\n" + response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"pending\",\n \"expires_at\": 1719127296\n}\n" + '/uploads/{upload_id}/cancel': + post: tags: - - Invites - summary: Retrieves an invite. - operationId: retrieve-invite + - Uploads + summary: "Cancels the Upload. No Parts may be added after an Upload is cancelled.\n" + operationId: cancelUpload parameters: - - name: invite_id + - name: upload_id in: path - description: The ID of the invite to retrieve. + description: "The ID of the Upload.\n" required: true schema: type: string + example: upload_abc123 responses: '200': - description: Invite retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Invite' + $ref: '#/components/schemas/Upload' x-oaiMeta: - name: Retrieve invite - group: administration - returns: 'The [Invite](/docs/api-reference/invite/object) object matching the specified ID.' + name: Cancel upload + group: uploads + returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`.' examples: request: - curl: "curl https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - delete: + curl: "curl https://api.openai.com/v1/uploads/upload_abc123/cancel\n" + response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"cancelled\",\n \"expires_at\": 1719127296\n}\n" + '/uploads/{upload_id}/complete': + post: tags: - - Invites - summary: 'Delete an invite. If the invite has already been accepted, it cannot be deleted.' - operationId: delete-invite + - Uploads + summary: "Completes the [Upload](/docs/api-reference/uploads/object). \n\nWithin the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform.\n\nYou can specify the order of the Parts by passing in an ordered list of the Part IDs.\n\nThe number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.\n" + operationId: completeUpload parameters: - - name: invite_id + - name: upload_id in: path - description: The ID of the invite to delete. + description: "The ID of the Upload.\n" required: true schema: type: string + example: upload_abc123 + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CompleteUploadRequest' + required: true responses: '200': - description: Invite deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteDeleteResponse' - x-oaiMeta: - name: Delete invite - group: administration - returns: Confirmation that the invite has been deleted - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite.deleted\",\n \"id\": \"invite-abc\",\n \"deleted\": true\n} \n" - /organization/users: - get: - tags: - - Users - summary: Lists all of the users in the organization. - operationId: list-users - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Users listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/UserListResponse' + $ref: '#/components/schemas/Upload' x-oaiMeta: - name: List users - group: administration - returns: 'A list of [User](/docs/api-reference/users/object) objects.' + name: Complete upload + group: uploads + returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - '/organization/users/{user_id}': - get: + curl: "curl https://api.openai.com/v1/uploads/upload_abc123/complete\n -d '{\n \"part_ids\": [\"part_def456\", \"part_ghi789\"]\n }'\n" + response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" + '/uploads/{upload_id}/parts': + post: tags: - - Users - summary: Retrieves a user by their identifier. - operationId: retrieve-user + - Uploads + summary: "Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. \n\nEach Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.\n\nIt is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete).\n" + operationId: addUploadPart parameters: - - name: user_id + - name: upload_id in: path - description: The ID of the user. + description: "The ID of the Upload.\n" required: true schema: type: string - responses: - '200': - description: User retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Retrieve user - group: administration - returns: 'The [User](/docs/api-reference/users/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - post: - tags: - - Users - summary: Modifies a user's role in the organization. - operationId: modify-user + example: upload_abc123 requestBody: - description: The new user role to modify. This must be one of `owner` or `member`. content: - application/json: + multipart/form-data: schema: - $ref: '#/components/schemas/UserRoleUpdateRequest' + $ref: '#/components/schemas/AddUploadPartRequest' required: true responses: '200': - description: User role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Modify user - group: administration - returns: 'The updated [User](/docs/api-reference/users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: - tags: - - Users - summary: Deletes a user from the organization. - operationId: delete-user - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: User deleted successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/UserDeleteResponse' + $ref: '#/components/schemas/UploadPart' x-oaiMeta: - name: Delete user - group: administration - returns: Confirmation of the deleted user + name: Add upload part + group: uploads + returns: 'The upload [Part](/docs/api-reference/uploads/part-object) object.' examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n} \n" - /organization/projects: + curl: "curl https://api.openai.com/v1/uploads/upload_abc123/parts\n -F data=\"aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz...\"\n" + response: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719185911,\n \"upload_id\": \"upload_abc123\"\n}\n" + /vector_stores: get: tags: - - Projects - summary: Returns a list of projects. - operationId: list-projects + - Vector stores + summary: Returns a list of vector stores. + operationId: listVectorStores parameters: - name: limit in: query @@ -2951,362 +3072,299 @@ paths: schema: type: integer default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - - name: include_archived + - name: before in: query - description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: - type: boolean - default: false + type: string responses: '200': - description: Projects listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectListResponse' + $ref: '#/components/schemas/ListVectorStoresResponse' x-oaiMeta: - name: List projects - group: administration - returns: 'A list of [Project](/docs/api-reference/projects/object) objects.' + name: List vector stores + group: vector_stores + beta: true + returns: 'A list of [vector store](/docs/api-reference/vector-stores/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n }\n ],\n \"first_id\": \"proj-abc\",\n \"last_id\": \"proj-xyz\",\n \"has_more\": false\n}\n" + curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_stores = client.beta.vector_stores.list()\nprint(vector_stores)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStores = await openai.beta.vectorStores.list();\n console.log(vectorStores);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n },\n {\n \"id\": \"vs_abc456\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ v2\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n }\n ],\n \"first_id\": \"vs_abc123\",\n \"last_id\": \"vs_abc456\",\n \"has_more\": false\n}\n" post: tags: - - Projects - summary: 'Create a new project in the organization. Projects can be created and archived, but cannot be deleted.' - operationId: create-project + - Vector stores + summary: Create a vector store. + operationId: createVectorStore requestBody: - description: The project create request payload. content: application/json: schema: - $ref: '#/components/schemas/ProjectCreateRequest' + $ref: '#/components/schemas/CreateVectorStoreRequest' required: true responses: '200': - description: Project created successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Project' + $ref: '#/components/schemas/VectorStoreObject' x-oaiMeta: - name: Create project - group: administration - returns: 'The created [Project](/docs/api-reference/projects/object) object.' + name: Create vector store + group: vector_stores + beta: true + returns: 'A [vector store](/docs/api-reference/vector-stores/object) object.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project ABC\"\n }'\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project ABC\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - '/organization/projects/{project_id}': + curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.create(\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.create({\n name: \"Support FAQ\"\n });\n console.log(vectorStore);\n}\n\nmain();\n" + response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" + '/vector_stores/{vector_store_id}': get: tags: - - Projects - summary: Retrieves a project. - operationId: retrieve-project + - Vector stores + summary: Retrieves a vector store. + operationId: getVectorStore parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store to retrieve. required: true schema: type: string responses: '200': - description: Project retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Project' + $ref: '#/components/schemas/VectorStoreObject' x-oaiMeta: - name: Retrieve project - group: administration - description: Retrieve a project. - returns: 'The [Project](/docs/api-reference/projects/object) object matching the specified ID.' + name: Retrieve vector store + group: vector_stores + beta: true + returns: 'The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.retrieve(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.retrieve(\n \"vs_abc123\"\n );\n console.log(vectorStore);\n}\n\nmain();\n" + response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776\n}\n" post: tags: - - Projects - summary: Modifies a project in the organization. - operationId: modify-project + - Vector stores + summary: Modifies a vector store. + operationId: modifyVectorStore + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store to modify. + required: true + schema: + type: string requestBody: - description: The project update request payload. content: application/json: schema: - $ref: '#/components/schemas/ProjectUpdateRequest' + $ref: '#/components/schemas/UpdateVectorStoreRequest' required: true responses: '200': - description: Project updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - '400': - description: Error response when updating the default project. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/VectorStoreObject' x-oaiMeta: - name: Modify project - group: administration - returns: 'The updated [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project DEF\"\n }'\n" - '/organization/projects/{project_id}/archive': - post: - tags: - - Projects - summary: Archives a project in the organization. Archived projects cannot be used or updated. - operationId: archive-project - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - '200': - description: Project archived successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Archive project - group: administration - returns: 'The archived [Project](/docs/api-reference/projects/object) object.' + name: Modify vector store + group: vector_stores + beta: true + returns: 'The modified [vector store](/docs/api-reference/vector-stores/object) object.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project DEF\",\n \"created_at\": 1711471533,\n \"archived_at\": 1711471533,\n \"status\": \"archived\"\n}\n" - '/organization/projects/{project_id}/users': - get: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.update(\n vector_store_id=\"vs_abc123\",\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.update(\n \"vs_abc123\",\n {\n name: \"Support FAQ\"\n }\n );\n console.log(vectorStore);\n}\n\nmain();\n" + response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" + delete: tags: - - Projects - summary: Returns a list of users in the project. - operationId: list-project-users + - Vector stores + summary: Delete a vector store. + operationId: deleteVectorStore parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store to delete. required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string responses: '200': - description: Project users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserListResponse' - '400': - description: Error response when project is archived. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/DeleteVectorStoreResponse' x-oaiMeta: - name: List project users - group: administration - returns: 'A list of [ProjectUser](/docs/api-reference/project-users/object) objects.' + name: Delete vector store + group: vector_stores + beta: true + returns: Deletion status examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store = client.beta.vector_stores.delete(\n vector_store_id=\"vs_abc123\"\n)\nprint(deleted_vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStore = await openai.beta.vectorStores.del(\n \"vs_abc123\"\n );\n console.log(deletedVectorStore);\n}\n\nmain();\n" + response: "{\n id: \"vs_abc123\",\n object: \"vector_store.deleted\",\n deleted: true\n}\n" + '/vector_stores/{vector_store_id}/file_batches': post: tags: - - Projects - summary: Adds a user to the project. Users must already be members of the organization to be added to a project. - operationId: create-project-user + - Vector stores + summary: Create a vector store file batch. + operationId: createVectorStoreFileBatch parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: "The ID of the vector store for which to create a File Batch.\n" required: true schema: type: string + example: vs_abc123 requestBody: - description: The project user create request payload. content: application/json: schema: - $ref: '#/components/schemas/ProjectUserCreateRequest' + $ref: '#/components/schemas/CreateVectorStoreFileBatchRequest' required: true responses: '200': - description: User added to project successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/VectorStoreFileBatchObject' x-oaiMeta: - name: Create project user - group: administration - returns: 'The created [ProjectUser](/docs/api-reference/project-users/object) object.' + name: Create vector store file batch + group: vector_stores + beta: true + returns: 'A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"user_id\": \"user_abc\",\n \"role\": \"member\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/users/{user_id}': + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_ids\": [\"file-abc123\", \"file-abc456\"]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.create(\n vector_store_id=\"vs_abc123\",\n file_ids=[\"file-abc123\", \"file-abc456\"]\n)\nprint(vector_store_file_batch)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create(\n \"vs_abc123\",\n {\n file_ids: [\"file-abc123\", \"file-abc456\"]\n }\n );\n console.log(myVectorStoreFileBatch);\n}\n\nmain();\n" + response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" + '/vector_stores/{vector_store_id}/file_batches/{batch_id}': get: tags: - - Projects - summary: Retrieves a user in the project. - operationId: retrieve-project-user + - Vector stores + summary: Retrieves a vector store file batch. + operationId: getVectorStoreFileBatch parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the file batch belongs to. required: true schema: type: string - - name: user_id + example: vs_abc123 + - name: batch_id in: path - description: The ID of the user. + description: The ID of the file batch being retrieved. required: true schema: type: string + example: vsfb_abc123 responses: '200': - description: Project user retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectUser' + $ref: '#/components/schemas/VectorStoreFileBatchObject' x-oaiMeta: - name: Retrieve project user - group: administration - returns: 'The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID.' + name: Retrieve vector store file batch + group: vector_stores + beta: true + returns: 'The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.retrieve(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_file_batch)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFileBatch);\n}\n\nmain();\n" + response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" + '/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel': post: tags: - - Projects - summary: Modifies a user's role in the project. - operationId: modify-project-user - requestBody: - description: The project user update request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserUpdateRequest' - required: true - responses: - '200': - description: Project user's role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project user - group: administration - returns: 'The updated [ProjectUser](/docs/api-reference/project-users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: - tags: - - Projects - summary: Deletes a user from the project. - operationId: delete-project-user + - Vector stores + summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. + operationId: cancelVectorStoreFileBatch parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the file batch belongs to. required: true schema: type: string - - name: user_id + - name: batch_id in: path - description: The ID of the user. + description: The ID of the file batch to cancel. required: true schema: type: string responses: '200': - description: Project user deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserDeleteResponse' - '400': - description: Error response for various conditions. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/VectorStoreFileBatchObject' x-oaiMeta: - name: Delete project user - group: administration - returns: 'Confirmation that project has been deleted or an error in case of an archived project, which has no users' + name: Cancel vector store file batch + group: vector_stores + beta: true + returns: The modified vector store file batch object. examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/service_accounts': + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel(\n vector_store_id=\"vs_abc123\",\n file_batch_id=\"vsfb_abc123\"\n)\nprint(deleted_vector_store_file_batch)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(deletedVectorStoreFileBatch);\n}\n\nmain();\n" + response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"cancelling\",\n \"file_counts\": {\n \"in_progress\": 12,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 15,\n }\n}\n" + '/vector_stores/{vector_store_id}/file_batches/{batch_id}/files': get: tags: - - Projects - summary: Returns a list of service accounts in the project. - operationId: list-project-service-accounts + - Vector stores + summary: Returns a list of vector store files in a batch. + operationId: listFilesInVectorStoreBatch parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: batch_id + in: path + description: The ID of the file batch that the files belong to. required: true schema: type: string @@ -3316,1135 +3374,2339 @@ paths: schema: type: integer default: 20 - - name: after + - name: order in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" schema: - type: string - responses: - '200': - description: Project service accounts listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountListResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project service accounts - group: administration - returns: 'A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n ],\n \"first_id\": \"svc_acct_abc\",\n \"last_id\": \"svc_acct_xyz\",\n \"has_more\": false\n}\n" - post: - tags: - - Projects - summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. - operationId: create-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - requestBody: - description: The project service account create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' - required: true + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string + - name: filter + in: query + description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' + schema: + enum: + - in_progress + - completed + - failed + - cancelled + type: string responses: '200': - description: Project service account created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' - '400': - description: Error response when project is archived. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/ListVectorStoreFilesResponse' x-oaiMeta: - name: Create project service account - group: administration - returns: 'The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object.' + name: List vector store files in a batch + group: vector_stores + beta: true + returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Production App\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Production App\",\n \"role\": \"member\",\n \"created_at\": 1711471533,\n \"api_key\": {\n \"object\": \"organization.project.service_account.api_key\",\n \"value\": \"sk-abcdefghijklmnop123\",\n \"name\": \"Secret Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\"\n }\n}\n" - '/organization/projects/{project_id}/service_accounts/{service_account_id}': + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.file_batches.list_files(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_files)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" + '/vector_stores/{vector_store_id}/files': get: tags: - - Projects - summary: Retrieves a service account in the project. - operationId: retrieve-project-service-account + - Vector stores + summary: Returns a list of vector store files. + operationId: listVectorStoreFiles parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the files belong to. required: true schema: type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string + - name: filter + in: query + description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' schema: + enum: + - in_progress + - completed + - failed + - cancelled type: string responses: '200': - description: Project service account retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectServiceAccount' + $ref: '#/components/schemas/ListVectorStoreFilesResponse' x-oaiMeta: - name: Retrieve project service account - group: administration - returns: 'The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID.' + name: List vector store files + group: vector_stores + beta: true + returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - delete: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.files.list(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store_files)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.files.list(\n \"vs_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" + post: tags: - - Projects - summary: Deletes a service account from the project. - operationId: delete-project-service-account + - Vector stores + summary: 'Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).' + operationId: createVectorStoreFile parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id + - name: vector_store_id in: path - description: The ID of the service account. + description: "The ID of the vector store for which to create a File.\n" required: true schema: type: string + example: vs_abc123 + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateVectorStoreFileRequest' + required: true responses: '200': - description: Project service account deleted successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' + $ref: '#/components/schemas/VectorStoreFileObject' x-oaiMeta: - name: Delete project service account - group: administration - returns: 'Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts' + name: Create vector store file + group: vector_stores + beta: true + returns: 'A [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account.deleted\",\n \"id\": \"svc_acct_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/api_keys': + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_id\": \"file-abc123\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.create(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFile = await openai.beta.vectorStores.files.create(\n \"vs_abc123\",\n {\n file_id: \"file-abc123\"\n }\n );\n console.log(myVectorStoreFile);\n}\n\nmain();\n" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"usage_bytes\": 1234,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" + '/vector_stores/{vector_store_id}/files/{file_id}': get: tags: - - Projects - summary: Returns a list of API keys in the project. - operationId: list-project-api-keys + - Vector stores + summary: Retrieves a vector store file. + operationId: getVectorStoreFile parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the file belongs to. required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + example: vs_abc123 + - name: file_id + in: path + description: The ID of the file being retrieved. + required: true schema: type: string + example: file-abc123 responses: '200': - description: Project API keys listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectApiKeyListResponse' + $ref: '#/components/schemas/VectorStoreFileObject' x-oaiMeta: - name: List project API keys - group: administration - returns: 'A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects.' + name: Retrieve vector store file + group: vector_stores + beta: true + returns: 'The [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n }\n ],\n \"first_id\": \"key_abc\",\n \"last_id\": \"key_xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/api_keys/{key_id}': - get: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.retrieve(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFile = await openai.beta.vectorStores.files.retrieve(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(vectorStoreFile);\n}\n\nmain();\n" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" + delete: tags: - - Projects - summary: Retrieves an API key in the project. - operationId: retrieve-project-api-key + - Vector stores + summary: 'Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.' + operationId: deleteVectorStoreFile parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the file belongs to. required: true schema: type: string - - name: key_id + - name: file_id in: path - description: The ID of the API key. + description: The ID of the file to delete. required: true schema: type: string responses: '200': - description: Project API key retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKey' - x-oaiMeta: - name: Retrieve project API key - group: administration - returns: 'The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - delete: - tags: - - Projects - summary: Deletes an API key from the project. - operationId: delete-project-api-key - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - '200': - description: Project API key deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' - '400': - description: Error response for various conditions. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/DeleteVectorStoreFileResponse' x-oaiMeta: - name: Delete project API key - group: administration - returns: Confirmation of the key's deletion or an error if the key belonged to a service account + name: Delete vector store file + group: vector_stores + beta: true + returns: Deletion status examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key.deleted\",\n \"id\": \"key_abc\",\n \"deleted\": true\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"API keys cannot be deleted for service accounts, please delete the service account\"\n} \n" + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file = client.beta.vector_stores.files.delete(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(deleted_vector_store_file)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFile = await openai.beta.vectorStores.files.del(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(deletedVectorStoreFile);\n}\n\nmain();\n" + response: "{\n id: \"file-abc123\",\n object: \"vector_store.file.deleted\",\n deleted: true\n}\n" components: schemas: - Error: - required: - - type - - message - - param - - code - type: object - properties: - code: - type: string - nullable: true - message: - type: string - param: - type: string - nullable: true - type: - type: string - ErrorResponse: - required: - - error - type: object - properties: - error: - $ref: '#/components/schemas/Error' - ListModelsResponse: + AddUploadPartRequest: required: - - object - data type: object properties: - object: - enum: - - list - type: string data: - type: array - items: - $ref: '#/components/schemas/Model' - DeleteModelResponse: + type: string + description: "The chunk of bytes for this Part.\n" + format: binary + additionalProperties: false + AssistantObject: + title: Assistant required: - id - object - - deleted + - created_at + - name + - description + - model + - instructions + - tools + - metadata type: object properties: id: type: string - deleted: - type: boolean + description: 'The identifier, which can be referenced in API endpoints.' object: + enum: + - assistant type: string - CreateCompletionRequest: - required: - - model - - prompt - type: object - properties: - model: - anyOf: - - type: string - - enum: - - gpt-3.5-turbo-instruct - - davinci-002 - - babbage-002 - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - x-oaiTypeLabel: string - prompt: - oneOf: - - type: string - default: '' - example: This is a test. - - type: array - items: - type: string - default: '' - example: This is a test. - - minItems: 1 - type: array - items: - type: integer - example: '[1212, 318, 257, 1332, 13]' - - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - example: '[[1212, 318, 257, 1332, 13]]' - description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n" - default: <|endoftext|> - nullable: true - best_of: - maximum: 20 - minimum: 0 + description: 'The object type, which is always `assistant`.' + created_at: type: integer - description: "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 + description: The Unix timestamp (in seconds) for when the assistant was created. + name: + maxLength: 256 + type: string + description: "The name of the assistant. The maximum length is 256 characters.\n" nullable: true - echo: - type: boolean - description: "Echo back the prompt in addition to the completion\n" - default: false + description: + maxLength: 512 + type: string + description: "The description of the assistant. The maximum length is 512 characters.\n" nullable: true - frequency_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 + model: + type: string + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + instructions: + maxLength: 256000 + type: string + description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" nullable: true - logit_bias: + tools: + maxItems: 128 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" + tool_resources: type: object - additionalProperties: - type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n" - default: + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true x-oaiTypeLabel: map - logprobs: - maximum: 5 + temperature: + maximum: 2 minimum: 0 - type: integer - description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n" - default: + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 nullable: true - max_tokens: + example: 1 + top_p: + maximum: 1 minimum: 0 - type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - default: 16 - nullable: true - example: 16 - n: - maximum: 128 - minimum: 1 - type: integer - description: "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" default: 1 nullable: true example: 1 - presence_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - stop: - oneOf: - - type: string - default: <|endoftext|> - nullable: true - example: "\n" - - maxItems: 4 - minItems: 1 - type: array - items: - type: string - example: '["\n"]' - description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n" - default: - nullable: true - stream: - type: boolean - description: "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" - default: false - nullable: true - stream_options: - $ref: '#/components/schemas/ChatCompletionStreamOptions' - suffix: - type: string - description: "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n" - default: - nullable: true - example: test. - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" - default: 1 - nullable: true - example: 1 - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateCompletionResponse: + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + description: Represents an `assistant` that can call the model and use tools. + x-oaiMeta: + name: The assistant object + beta: true + example: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + AssistantStreamEvent: + oneOf: + - $ref: '#/components/schemas/ThreadStreamEvent' + - $ref: '#/components/schemas/RunStreamEvent' + - $ref: '#/components/schemas/RunStepStreamEvent' + - $ref: '#/components/schemas/MessageStreamEvent' + - $ref: '#/components/schemas/ErrorEvent' + - $ref: '#/components/schemas/DoneEvent' + description: "Represents an event emitted when streaming a Run.\n\nEach event in a server-sent events stream has an `event` and `data` property:\n\n```\nevent: thread.created\ndata: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n```\n\nWe emit events whenever a new object is created, transitions to a new state, or is being\nstreamed in parts (deltas). For example, we emit `thread.run.created` when a new run\nis created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses\nto create a message during a run, we emit a `thread.message.created event`, a\n`thread.message.in_progress` event, many `thread.message.delta` events, and finally a\n`thread.message.completed` event.\n\nWe may add additional events over time, so we recommend handling unknown events gracefully\nin your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to\nintegrate the Assistants API with streaming.\n" + x-oaiMeta: + name: Assistant stream events + beta: true + AssistantToolsCode: + title: Code interpreter tool required: - - id - - object - - created - - model - - choices + - type type: object properties: - id: - type: string - description: A unique identifier for the completion. - choices: - type: array - items: - required: - - finish_reason - - index - - logprobs - - text - type: object - properties: - finish_reason: - enum: - - stop - - length - - content_filter - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" - index: - type: integer - logprobs: - type: object - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - nullable: true - text: - type: string - description: The list of completion choices the model generated for the input prompt. - created: - type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: + type: enum: - - text_completion + - code_interpreter type: string - description: 'The object type, which is always "text_completion"' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: "Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).\n" - x-oaiMeta: - name: The completion object - legacy: true - example: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"gpt-4-turbo\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - ChatCompletionRequestMessageContentPartText: - title: Text content part + description: 'The type of tool being defined: `code_interpreter`' + AssistantToolsFileSearch: + title: FileSearch tool required: - type - - text type: object properties: type: enum: - - text - type: string - description: The type of the content part. - text: + - file_search type: string - description: The text content. - ChatCompletionRequestMessageContentPartImage: - title: Image content part + description: 'The type of tool being defined: `file_search`' + file_search: + type: object + properties: + max_num_results: + maximum: 50 + minimum: 1 + type: integer + description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + ranking_options: + $ref: '#/components/schemas/FileSearchRankingOptions' + description: Overrides for the file search tool. + AssistantToolsFileSearchTypeOnly: + title: FileSearch tool required: - type - - image_url type: object properties: type: enum: - - image_url + - file_search type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).' - default: auto - ChatCompletionRequestMessageContentPartRefusal: - title: Refusal content part + description: 'The type of tool being defined: `file_search`' + AssistantToolsFunction: + title: Function tool required: - type - - refusal + - function type: object properties: type: enum: - - refusal - type: string - description: The type of the content part. - refusal: + - function type: string - description: The refusal message generated by the model. - ChatCompletionRequestMessage: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestUserMessageContentPart: + description: 'The type of tool being defined: `function`' + function: + $ref: '#/components/schemas/FunctionObject' + AssistantsApiResponseFormatOption: oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + - enum: + - auto + type: string + description: "`auto` is the default value\n" + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" x-oaiExpandable: true - ChatCompletionRequestAssistantMessageContentPart: + AssistantsApiToolChoiceOption: oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' + - enum: + - none + - auto + - required + type: string + description: "`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.\n" + - $ref: '#/components/schemas/AssistantsNamedToolChoice' + description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n" x-oaiExpandable: true - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestSystemMessage: - title: System message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the system message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' - description: 'An array of content parts with a defined type. For system messages, only type `text` is supported.' - description: The contents of the system message. - role: - enum: - - system - type: string - description: 'The role of the messages author, in this case `system`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestUserMessage: - title: User message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' - description: 'An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model.' - description: "The contents of the user message.\n" - x-oaiExpandable: true - role: - enum: - - user - type: string - description: 'The role of the messages author, in this case `user`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestAssistantMessage: - title: Assistant message + AssistantsNamedToolChoice: required: - - role + - type type: object properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the assistant message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' - description: 'An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.' - description: "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n" - nullable: true - refusal: - type: string - description: The refusal message by the assistant. - nullable: true - role: + type: enum: - - assistant - type: string - description: 'The role of the messages author, in this case `assistant`.' - name: + - function + - code_interpreter + - file_search type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - function_call: + description: 'The type of the tool. If type is `function`, the function name must be set' + function: required: - - arguments - name type: object properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' name: type: string description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - nullable: true - deprecated: true - FineTuneChatCompletionRequestAssistantMessage: - required: - - role - allOf: - - title: Assistant message - type: object - properties: - weight: - enum: - - 0 - - 1 - type: integer - description: Controls whether the assistant message is trained against (0 or 1) - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - ChatCompletionRequestToolMessage: - title: Tool message - required: - - role - - content - - tool_call_id - type: object - properties: - role: - enum: - - tool - type: string - description: 'The role of the messages author, in this case `tool`.' - content: - oneOf: - - title: Text content - type: string - description: The contents of the tool message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' - description: 'An array of content parts with a defined type. For tool messages, only type `text` is supported.' - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - ChatCompletionRequestFunctionMessage: - title: Function message - required: - - role - - content - - name - type: object - properties: - role: - enum: - - function - type: string - description: 'The role of the messages author, in this case `function`.' - content: - type: string - description: The contents of the function message. - nullable: true - name: - type: string - description: The name of the function to call. - deprecated: true - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - ChatCompletionFunctions: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - deprecated: true - ChatCompletionFunctionCallOption: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n" - ChatCompletionTool: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - $ref: '#/components/schemas/FunctionObject' - FunctionObject: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).' - default: false - nullable: true - ResponseFormatText: + description: Specifies a tool the model should use. Use to force the model to call a specific tool. + AudioResponseFormat: + enum: + - json + - text + - srt + - verbose_json + - vtt + type: string + description: "The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" + default: json + AuditLog: required: + - id - type + - effective_at + - actor type: object properties: - type: - enum: - - text + id: type: string - description: 'The type of response format being defined: `text`' - ResponseFormatJsonObject: - required: - - type - type: object - properties: + description: The ID of this log. type: - enum: - - json_object - type: string - description: 'The type of response format being defined: `json_object`' - ResponseFormatJsonSchemaSchema: - type: object - description: 'The schema for the response format, described as a JSON Schema object.' - ResponseFormatJsonSchema: - required: - - type - - json_schema - type: object - properties: - type: - enum: - - json_schema - type: string - description: 'The type of response format being defined: `json_schema`' - json_schema: - required: - - type - - name + $ref: '#/components/schemas/AuditLogEventType' + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: type: object properties: - description: + id: type: string - description: 'A description of what the response format is for, used by the model to determine how to respond in the format.' + description: The project ID. name: type: string - description: 'The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).' - default: false - nullable: true - ChatCompletionToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required - type: string - description: "`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.\n" - - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" - x-oaiExpandable: true - ChatCompletionNamedToolChoice: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name + description: The project title. + description: The project that the action was scoped to. Absent for actions not scoped to projects. + actor: + $ref: '#/components/schemas/AuditLogActor' + api_key.created: type: object properties: - name: + id: type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific function. - ParallelToolCalls: - type: boolean - description: 'Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.' - nullable: true - ChatCompletionMessageToolCalls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCall' - description: 'The tool calls generated by the model, such as function calls.' - ChatCompletionMessageToolCall: - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - - arguments + description: The tracking ID of the API key. + data: + type: object + properties: + scopes: + type: array + items: + type: string + description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' + description: The payload used to create the API key. + description: The details for events with this `type`. + api_key.updated: type: object properties: - name: + id: type: string - description: The name of the function to call. - arguments: + description: The tracking ID of the API key. + changes_requested: + type: object + properties: + scopes: + type: array + items: + type: string + description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' + description: The payload used to update the API key. + description: The details for events with this `type`. + api_key.deleted: + type: object + properties: + id: type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - description: The function that the model called. - ChatCompletionMessageToolCallChunk: - required: - - index - type: object - properties: - index: - type: integer - id: - type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: + description: The tracking ID of the API key. + description: The details for events with this `type`. + invite.sent: type: object properties: - name: + id: type: string - description: The name of the function to call. - arguments: + description: The ID of the invite. + data: + type: object + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + description: The payload used to create the invite. + description: The details for events with this `type`. + invite.accepted: + type: object + properties: + id: type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - ChatCompletionRole: - enum: - - system - - user - - assistant - - tool - - function - type: string - description: The role of the author of a message - ChatCompletionStreamOptions: - type: object - properties: - include_usage: - type: boolean - description: "If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.\n" - description: "Options for streaming response. Only set this when you set `stream: true`.\n" - default: - nullable: true - ChatCompletionResponseMessage: - required: - - role - type: object - properties: - content: - type: string - description: The contents of the message. - nullable: true - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - role: - enum: - - assistant - type: string - description: The role of the author of this message. - function_call: - required: - - name - - arguments + description: The ID of the invite. + description: The details for events with this `type`. + invite.deleted: type: object properties: - arguments: + id: type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: + description: The ID of the invite. + description: The details for events with this `type`. + login.failed: + type: object + properties: + error_code: type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - description: A chat completion message generated by the model. - ChatCompletionStreamResponseDelta: - type: object - properties: - content: - type: string - description: The contents of the chunk message. - nullable: true - function_call: + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + description: The details for events with this `type`. + logout.failed: type: object properties: - arguments: + error_code: type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: + description: The error code of the failure. + error_message: type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCallChunk' - role: - enum: - - system - - user + description: The error message of the failure. + description: The details for events with this `type`. + organization.updated: + type: object + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: 'Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.' + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + description: The payload used to update the organization settings. + description: The details for events with this `type`. + project.created: + type: object + properties: + id: + type: string + description: The project ID. + data: + type: object + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + description: The payload used to create the project. + description: The details for events with this `type`. + project.updated: + type: object + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + description: The payload used to update the project. + description: The details for events with this `type`. + project.archived: + type: object + properties: + id: + type: string + description: The project ID. + description: The details for events with this `type`. + service_account.created: + type: object + properties: + id: + type: string + description: The service account ID. + data: + type: object + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + description: The payload used to create the service account. + description: The details for events with this `type`. + service_account.updated: + type: object + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + description: The payload used to updated the service account. + description: The details for events with this `type`. + service_account.deleted: + type: object + properties: + id: + type: string + description: The service account ID. + description: The details for events with this `type`. + user.added: + type: object + properties: + id: + type: string + description: The user ID. + data: + type: object + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + description: The payload used to add the user to the project. + description: The details for events with this `type`. + user.updated: + type: object + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + description: The payload used to update the user. + description: The details for events with this `type`. + user.deleted: + type: object + properties: + id: + type: string + description: The user ID. + description: The details for events with this `type`. + description: A log of a user action or configuration change within this organization. + x-oaiMeta: + name: The audit log object + example: "{\n \"id\": \"req_xxx_20240101\",\n \"type\": \"api_key.created\",\n \"effective_at\": 1720804090,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.created\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource.operation\"]\n }\n }\n}\n" + AuditLogActor: + type: object + properties: + type: + enum: + - session + - api_key + type: string + description: The type of actor. Is either `session` or `api_key`. + session: + $ref: '#/components/schemas/AuditLogActorSession' + api_key: + $ref: '#/components/schemas/AuditLogActorApiKey' + description: The actor who performed the audit logged action. + AuditLogActorApiKey: + type: object + properties: + id: + type: string + description: The tracking id of the API key. + type: + enum: + - user + - service_account + type: string + description: The type of API key. Can be either `user` or `service_account`. + user: + $ref: '#/components/schemas/AuditLogActorUser' + service_account: + $ref: '#/components/schemas/AuditLogActorServiceAccount' + description: The API Key used to perform the audit logged action. + AuditLogActorServiceAccount: + type: object + properties: + id: + type: string + description: The service account id. + description: The service account that performed the audit logged action. + AuditLogActorSession: + type: object + properties: + user: + $ref: '#/components/schemas/AuditLogActorUser' + ip_address: + type: string + description: The IP address from which the action was performed. + description: The session in which the audit logged action was performed. + AuditLogActorUser: + type: object + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. + description: The user who performed the audit logged action. + AuditLogEventType: + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - user.added + - user.updated + - user.deleted + type: string + description: The event type. + x-oaiExpandable: true + AutoChunkingStrategyRequestParam: + title: Auto Chunking Strategy + required: + - type + type: object + properties: + type: + enum: + - auto + type: string + description: Always `auto`. + additionalProperties: false + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + Batch: + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + type: object + properties: + id: + type: string + object: + enum: + - batch + type: string + description: 'The object type, which is always `batch`.' + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + errors: + type: object + properties: + object: + type: string + description: 'The object type, which is always `list`.' + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: 'The name of the parameter that caused the error, if applicable.' + nullable: true + line: + type: integer + description: 'The line number of the input file where the error occurred, if applicable.' + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + type: string + description: The current status of the batch. + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + required: + - total + - completed + - failed + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + description: The request counts for different statuses within the batch. + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + x-oaiMeta: + name: The batch object + example: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + BatchRequestInput: + type: object + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + enum: + - POST + type: string + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: 'The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.' + description: The per-line object of the batch input file + x-oaiMeta: + name: The request input object + example: "{\"custom_id\": \"request-1\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \"body\": {\"model\": \"gpt-4o-mini\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is 2+2?\"}]}}\n" + BatchRequestOutput: + type: object + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + description: The JSON body of the response + x-oaiTypeLabel: map + nullable: true + error: + type: object + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + description: 'For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.' + nullable: true + description: The per-line object of the batch output and error files + x-oaiMeta: + name: The request output object + example: "{\"id\": \"batch_req_wnaDys\", \"custom_id\": \"request-2\", \"response\": {\"status_code\": 200, \"request_id\": \"req_c187b3\", \"body\": {\"id\": \"chatcmpl-9758Iw\", \"object\": \"chat.completion\", \"created\": 1711475054, \"model\": \"gpt-4o-mini\", \"choices\": [{\"index\": 0, \"message\": {\"role\": \"assistant\", \"content\": \"2 + 2 equals 4.\"}, \"finish_reason\": \"stop\"}], \"usage\": {\"prompt_tokens\": 24, \"completion_tokens\": 15, \"total_tokens\": 39}, \"system_fingerprint\": null}}, \"error\": null}\n" + CancelUploadRequest: + type: object + additionalProperties: false + ChatCompletionFunctionCallOption: + required: + - name + type: object + properties: + name: + type: string + description: The name of the function to call. + description: "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n" + ChatCompletionFunctions: + required: + - name + type: object + properties: + description: + type: string + description: 'A description of what the function does, used by the model to choose when and how to call the function.' + name: + type: string + description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' + parameters: + $ref: '#/components/schemas/FunctionParameters' + deprecated: true + ChatCompletionMessageToolCall: + required: + - id + - type + - function + type: object + properties: + id: + type: string + description: The ID of the tool call. + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + required: + - name + - arguments + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + description: The function that the model called. + ChatCompletionMessageToolCallChunk: + required: + - index + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + ChatCompletionMessageToolCalls: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCall' + description: 'The tool calls generated by the model, such as function calls.' + ChatCompletionModalities: + type: array + items: + enum: + - text + - audio + type: string + description: "Output types that you would like the model to generate for this request.\nMost models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To\nrequest that this model generate both text and audio responses, you can\nuse:\n\n`[\"text\", \"audio\"]`\n" + nullable: true + ChatCompletionNamedToolChoice: + required: + - type + - function + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + required: + - name + type: object + properties: + name: + type: string + description: The name of the function to call. + description: Specifies a tool the model should use. Use to force the model to call a specific function. + ChatCompletionRequestAssistantMessage: + title: Assistant message + required: + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The contents of the assistant message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' + description: 'An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.' + description: "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n" + nullable: true + x-oaiExpandable: true + refusal: + type: string + description: The refusal message by the assistant. + nullable: true + role: + enum: + - assistant + type: string + description: 'The role of the messages author, in this case `assistant`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + audio: + required: + - id + type: object + properties: + id: + type: string + description: "Unique identifier for a previous audio response from the model.\n" + description: "Data about a previous audio response from the model. \n[Learn more](/docs/guides/audio).\n" + nullable: true + x-oaiExpandable: true + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCalls' + function_call: + required: + - arguments + - name + type: object + properties: + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + name: + type: string + description: The name of the function to call. + description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + nullable: true + deprecated: true + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' + x-oaiExpandable: true + ChatCompletionRequestFunctionMessage: + title: Function message + required: + - role + - content + - name + type: object + properties: + role: + enum: + - function + type: string + description: 'The role of the messages author, in this case `function`.' + content: + type: string + description: The contents of the function message. + nullable: true + name: + type: string + description: The name of the function to call. + deprecated: true + ChatCompletionRequestMessage: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartAudio: + title: Audio content part + required: + - type + - input_audio + type: object + properties: + type: + enum: + - input_audio + type: string + description: The type of the content part. Always `input_audio`. + input_audio: + required: + - data + - format + type: object + properties: + data: + type: string + description: Base64 encoded audio data. + format: + enum: + - wav + - mp3 + type: string + description: "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".\n" + description: "Learn about [audio inputs](/docs/guides/audio).\n" + ChatCompletionRequestMessageContentPartImage: + title: Image content part + required: + - type + - image_url + type: object + properties: + type: + enum: + - image_url + type: string + description: The type of the content part. + image_url: + required: + - url + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).' + default: auto + description: "Learn about [image inputs](/docs/guides/vision).\n" + ChatCompletionRequestMessageContentPartRefusal: + title: Refusal content part + required: + - type + - refusal + type: object + properties: + type: + enum: + - refusal + type: string + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + ChatCompletionRequestMessageContentPartText: + title: Text content part + required: + - type + - text + type: object + properties: + type: + enum: + - text + type: string + description: The type of the content part. + text: + type: string + description: The text content. + description: "Learn about [text inputs](/docs/guides/text-generation).\n" + ChatCompletionRequestSystemMessage: + title: System message + required: + - content + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The contents of the system message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' + description: 'An array of content parts with a defined type. For system messages, only type `text` is supported.' + description: The contents of the system message. + role: + enum: + - system + type: string + description: 'The role of the messages author, in this case `system`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + x-oaiExpandable: true + ChatCompletionRequestToolMessage: + title: Tool message + required: + - role + - content + - tool_call_id + type: object + properties: + role: + enum: + - tool + type: string + description: 'The role of the messages author, in this case `tool`.' + content: + oneOf: + - title: Text content + type: string + description: The contents of the tool message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' + description: 'An array of content parts with a defined type. For tool messages, only type `text` is supported.' + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + x-oaiExpandable: true + ChatCompletionRequestUserMessage: + title: User message + required: + - content + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The text contents of the message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' + description: 'An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs.' + description: "The contents of the user message.\n" + x-oaiExpandable: true + role: + enum: + - user + type: string + description: 'The role of the messages author, in this case `user`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartAudio' + x-oaiExpandable: true + ChatCompletionResponseMessage: + required: + - role + type: object + properties: + content: + type: string + description: The contents of the message. + nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCalls' + role: + enum: + - assistant + type: string + description: The role of the author of this message. + function_call: + required: + - name + - arguments + type: object + properties: + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + name: + type: string + description: The name of the function to call. + description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + deprecated: true + audio: + required: + - id + - expires_at + - data + - transcript + type: object + properties: + id: + type: string + description: Unique identifier for this audio response. + expires_at: + type: integer + description: "The Unix timestamp (in seconds) for when this audio response will\nno longer be accessible on the server for use in multi-turn\nconversations.\n" + data: + type: string + description: "Base64 encoded audio bytes generated by the model, in the format\nspecified in the request.\n" + transcript: + type: string + description: Transcript of the audio generated by the model. + description: "If the audio output modality is requested, this object contains data\nabout the audio response from the model. [Learn more](/docs/guides/audio).\n" + nullable: true + x-oaiExpandable: true + description: A chat completion message generated by the model. + ChatCompletionRole: + enum: + - system + - user + - assistant + - tool + - function + type: string + description: The role of the author of a message + ChatCompletionStreamOptions: + type: object + properties: + include_usage: + type: boolean + description: "If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.\n" + description: "Options for streaming response. Only set this when you set `stream: true`.\n" + default: + nullable: true + ChatCompletionStreamResponseDelta: + type: object + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + type: object + properties: + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + name: + type: string + description: The name of the function to call. + description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + deprecated: true + tool_calls: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCallChunk' + role: + enum: + - system + - user - assistant - tool type: string - description: The role of the author of this message. - refusal: + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + description: A chat completion delta generated by streamed model responses. + ChatCompletionTokenLogprob: + required: + - token + - logprob + - bytes + - top_logprobs + type: object + properties: + token: + type: string + description: The token. + logprob: + type: number + description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' + bytes: + type: array + items: + type: integer + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + nullable: true + top_logprobs: + type: array + items: + required: + - token + - logprob + - bytes + type: object + properties: + token: + type: string + description: The token. + logprob: + type: number + description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' + bytes: + type: array + items: + type: integer + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + nullable: true + description: 'List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.' + ChatCompletionTool: + required: + - type + - function + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + $ref: '#/components/schemas/FunctionObject' + ChatCompletionToolChoiceOption: + oneOf: + - enum: + - none + - auto + - required + type: string + description: "`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.\n" + - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' + description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" + x-oaiExpandable: true + ChunkingStrategyRequestParam: + type: object + oneOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + x-oaiExpandable: true + CompleteUploadRequest: + required: + - part_ids + type: object + properties: + part_ids: + type: array + items: + type: string + description: "The ordered list of Part IDs.\n" + md5: + type: string + description: "The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect.\n" + additionalProperties: false + CompletionUsage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + properties: + audio_tokens: + type: integer + description: Audio input tokens generated by the model. + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. + description: Breakdown of tokens used in a completion. + prompt_tokens_details: + type: object + properties: + audio_tokens: + type: integer + description: Audio input tokens present in the prompt. + cached_tokens: + type: integer + description: Cached tokens present in the prompt. + description: Breakdown of tokens used in the prompt. + description: Usage statistics for the completion request. + CreateAssistantRequest: + required: + - model + type: object + properties: + model: + anyOf: + - type: string + - enum: + - gpt-4o + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + type: string + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + example: gpt-4o + x-oaiTypeLabel: string + name: + maxLength: 256 + type: string + description: "The name of the assistant. The maximum length is 256 characters.\n" + nullable: true + description: + maxLength: 512 + type: string + description: "The description of the assistant. The maximum length is 512 characters.\n" + nullable: true + instructions: + maxLength: 256000 + type: string + description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" + nullable: true + tools: + maxItems: 128 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + vector_stores: + maxItems: 1 + type: array + items: + type: object + properties: + file_ids: + maxItems: 10000 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" + chunking_strategy: + type: object + oneOf: + - title: Auto Chunking Strategy + required: + - type + type: object + properties: + type: + enum: + - auto + type: string + description: Always `auto`. + additionalProperties: false + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + - title: Static Chunking Strategy + required: + - type + - static + type: object + properties: + type: + enum: + - static + type: string + description: Always `static`. + static: + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + type: object + properties: + max_chunk_size_tokens: + maximum: 4096 + minimum: 100 + type: integer + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" + additionalProperties: false + additionalProperties: false + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + x-oaiExpandable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + x-oaiTypeLabel: map + description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 + nullable: true + example: 1 + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + additionalProperties: false + CreateChatCompletionFunctionResponse: + required: + - choices + - created + - id + - model + - object + type: object + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + items: + required: + - finish_reason + - index + - message + - logprobs + type: object + properties: + finish_reason: + enum: + - stop + - length + - function_call + - content_filter + type: string + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function.\n" + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" + object: + enum: + - chat.completion + type: string + description: 'The object type, which is always `chat.completion`.' + usage: + $ref: '#/components/schemas/CompletionUsage' + description: 'Represents a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion object + group: chat + example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + CreateChatCompletionImageResponse: + type: object + description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + CreateChatCompletionRequest: + required: + - model + - messages + type: object + properties: + messages: + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessage' + description: "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n" + model: + anyOf: + - type: string + - enum: + - o1-preview + - o1-preview-2024-09-12 + - o1-mini + - o1-mini-2024-09-12 + - gpt-4o + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-realtime-preview + - gpt-4o-realtime-preview-2024-10-01 + - gpt-4o-audio-preview + - gpt-4o-audio-preview-2024-10-01 + - chatgpt-4o-latest + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + type: string + description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.' + example: gpt-4o + x-oaiTypeLabel: string + store: + type: boolean + description: "Whether or not to store the output of this chat completion request\nfor use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.\n" + default: false + nullable: true + metadata: + type: object + additionalProperties: + type: string + description: "Developer-defined tags and values used for filtering completions\nin the [dashboard](https://platform.openai.com/chat-completions).\n" + nullable: true + frequency_penalty: + maximum: 2 + minimum: -2 + type: number + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + default: 0 + nullable: true + logit_bias: + type: object + additionalProperties: + type: integer + description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" + default: + nullable: true + x-oaiTypeLabel: map + logprobs: + type: boolean + description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' + default: false + nullable: true + top_logprobs: + maximum: 20 + minimum: 0 + type: integer + description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' + nullable: true + max_tokens: + type: integer + description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning).\n" + nullable: true + deprecated: true + max_completion_tokens: + type: integer + description: "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n" + nullable: true + n: + maximum: 128 + minimum: 1 + type: integer + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + default: 1 + nullable: true + example: 1 + modalities: + $ref: '#/components/schemas/ChatCompletionModalities' + audio: + required: + - voice + - format + type: object + properties: + voice: + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer + type: string + description: "Specifies the voice type. Supported voices are `alloy`, `echo`, \n`fable`, `onyx`, `nova`, and `shimmer`.\n" + format: + enum: + - wav + - mp3 + - flac + - opus + - pcm16 + type: string + description: "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`. \n" + description: "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n" + nullable: true + x-oaiExpandable: true + presence_penalty: + maximum: 2 + minimum: -2 + type: number + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + default: 0 + nullable: true + response_format: + oneOf: + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" + x-oaiExpandable: true + seed: + maximum: 9223372036854776000 + minimum: -9223372036854776000 + type: integer + description: "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" + nullable: true + x-oaiMeta: + beta: true + service_tier: + enum: + - auto + - default + type: string + description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. \n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" + default: auto + nullable: true + stop: + oneOf: + - type: string + nullable: true + - maxItems: 4 + minItems: 1 + type: array + items: + type: string + description: "Up to 4 sequences where the API will stop generating further tokens.\n" + default: + stream: + type: boolean + description: "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" + default: false + nullable: true + stream_options: + $ref: '#/components/schemas/ChatCompletionStreamOptions' + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" + default: 1 + nullable: true + example: 1 + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n" + tool_choice: + $ref: '#/components/schemas/ChatCompletionToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + user: + type: string + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + example: user-1234 + function_call: + oneOf: + - enum: + - none + - auto + type: string + description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" + - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' + description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" + deprecated: true + x-oaiExpandable: true + functions: + maxItems: 128 + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + description: "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n" + deprecated: true + CreateChatCompletionResponse: + required: + - choices + - created + - id + - model + - object + type: object + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + items: + required: + - finish_reason + - index + - message + type: object + properties: + finish_reason: + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + type: string + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + logprobs: + required: + - content + - refusal + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message content tokens with log probability information. + nullable: true + refusal: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message refusal tokens with log probability information. + nullable: true + description: Log probability information for the choice. + nullable: true + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + service_tier: + enum: + - scale + - default + type: string + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + nullable: true + example: scale + system_fingerprint: + type: string + description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" + object: + enum: + - chat.completion + type: string + description: 'The object type, which is always `chat.completion`.' + usage: + $ref: '#/components/schemas/CompletionUsage' + description: 'Represents a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion object + group: chat + example: "{\n \"id\": \"chatcmpl-123456\",\n \"object\": \"chat.completion\",\n \"created\": 1728933352,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hi there! How can I assist you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 19,\n \"completion_tokens\": 10,\n \"total_tokens\": 29,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_6b68a8204b\"\n}\n" + CreateChatCompletionStreamResponse: + required: + - choices + - created + - id + - model + - object + type: object + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + items: + required: + - delta + - finish_reason + - index + type: object + properties: + delta: + $ref: '#/components/schemas/ChatCompletionStreamResponseDelta' + logprobs: + required: + - content + - refusal + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message content tokens with log probability information. + nullable: true + refusal: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message refusal tokens with log probability information. + nullable: true + description: Log probability information for the choice. + nullable: true + finish_reason: + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + type: string + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + description: "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + service_tier: + enum: + - scale + - default type: string - description: The refusal message generated by the model. + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. nullable: true - description: A chat completion delta generated by streamed model responses. - CreateChatCompletionRequest: + example: scale + system_fingerprint: + type: string + description: "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" + object: + enum: + - chat.completion.chunk + type: string + description: 'The object type, which is always `chat.completion.chunk`.' + usage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" + description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" + CreateCompletionRequest: required: - model - - messages + - prompt type: object properties: - messages: - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' - description: 'A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).' model: anyOf: - type: string - enum: - - o1-preview - - o1-preview-2024-09-12 - - o1-mini - - o1-mini-2024-09-12 - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - chatgpt-4o-latest - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 type: string - description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.' - example: gpt-4o + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" x-oaiTypeLabel: string + prompt: + oneOf: + - type: string + default: '' + example: This is a test. + - type: array + items: + type: string + default: '' + example: This is a test. + - minItems: 1 + type: array + items: + type: integer + example: '[1212, 318, 257, 1332, 13]' + - minItems: 1 + type: array + items: + minItems: 1 + type: array + items: + type: integer + example: '[[1212, 318, 257, 1332, 13]]' + description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n" + default: <|endoftext|> + nullable: true + best_of: + maximum: 20 + minimum: 0 + type: integer + description: "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" + default: 1 + nullable: true + echo: + type: boolean + description: "Echo back the prompt in addition to the completion\n" + default: false + nullable: true frequency_penalty: maximum: 2 minimum: -2 @@ -4456,35 +5718,29 @@ components: type: object additionalProperties: type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" + description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n" default: nullable: true x-oaiTypeLabel: map logprobs: - type: boolean - description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' - default: false - nullable: true - top_logprobs: - maximum: 20 + maximum: 5 minimum: 0 type: integer - description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' + description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n" + default: nullable: true max_tokens: + minimum: 0 type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning).\n" - nullable: true - deprecated: true - max_completion_tokens: - type: integer - description: "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n" + description: "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" + default: 16 nullable: true + example: 16 n: maximum: 128 minimum: 1 type: integer - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" default: 1 nullable: true example: 1 @@ -4495,47 +5751,40 @@ components: description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" default: 0 nullable: true - response_format: - oneOf: - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 + maximum: 9223372036854776000 + minimum: -9223372036854776000 type: integer - description: "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - x-oaiMeta: - beta: true - service_tier: - enum: - - auto - - default - type: string - description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. \n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" - default: + description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" nullable: true stop: oneOf: - type: string + default: <|endoftext|> nullable: true + example: "\n" - maxItems: 4 minItems: 1 type: array items: type: string - description: "Up to 4 sequences where the API will stop generating further tokens.\n" + example: '["\n"]' + description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n" default: + nullable: true stream: type: boolean - description: "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" + description: "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" default: false nullable: true stream_options: $ref: '#/components/schemas/ChatCompletionStreamOptions' + suffix: + type: string + description: "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n" + default: + nullable: true + example: test. temperature: maximum: 2 minimum: 0 @@ -4552,347 +5801,388 @@ components: default: 1 nullable: true example: 1 - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n" - tool_choice: - $ref: '#/components/schemas/ChatCompletionToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' user: type: string description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" example: user-1234 - function_call: - oneOf: - - enum: - - none - - auto - type: string - description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" - deprecated: true - x-oaiExpandable: true - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n" - deprecated: true - CreateChatCompletionResponse: + CreateCompletionResponse: required: - - choices - - created - id - - model - object + - created + - model + - choices type: object properties: id: type: string - description: A unique identifier for the chat completion. + description: A unique identifier for the completion. choices: type: array items: required: - finish_reason - index - - message + - logprobs + - text type: object properties: finish_reason: enum: - stop - length - - tool_calls - content_filter - - function_call type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" index: type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' logprobs: - required: - - content - - refusal type: object properties: - content: + text_offset: type: array items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. - nullable: true - refusal: + type: integer + token_logprobs: type: array items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number nullable: true - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + text: + type: string + description: The list of completion choices the model generated for the input prompt. created: type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. + description: The Unix timestamp (in seconds) of when the completion was created. model: type: string - description: The model used for the chat completion. - service_tier: - enum: - - scale - - default - type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - nullable: true - example: scale + description: The model used for completion. system_fingerprint: type: string description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" object: enum: - - chat.completion + - text_completion type: string - description: 'The object type, which is always `chat.completion`.' + description: 'The object type, which is always "text_completion"' usage: $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' + description: "Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).\n" x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" - CreateChatCompletionFunctionResponse: + name: The completion object + legacy: true + example: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"gpt-4-turbo\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" + CreateEmbeddingRequest: required: - - choices - - created - - id - model - - object + - input type: object properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - required: - - finish_reason - - index - - message - - logprobs - type: object - properties: - finish_reason: - enum: - - stop - - length - - function_call - - content_filter + input: + oneOf: + - title: string + type: string + description: The string that will be turned into an embedding. + default: '' + example: This is a test. + - title: array + maxItems: 2048 + minItems: 1 + type: array + items: type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function.\n" - index: + default: '' + example: '[''This is a test.'']' + description: The array of strings that will be turned into an embedding. + - title: array + maxItems: 2048 + minItems: 1 + type: array + items: type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. + description: The array of integers that will be turned into an embedding. + example: '[1212, 318, 257, 1332, 13]' + - title: array + maxItems: 2048 + minItems: 1 + type: array + items: + minItems: 1 + type: array + items: + type: integer + description: The array of arrays containing integers that will be turned into an embedding. + example: '[[1212, 318, 257, 1332, 13]]' + description: "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" + example: The quick brown fox jumped over the lazy dog + x-oaiExpandable: true model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: + anyOf: + - type: string + - enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + type: string + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + example: text-embedding-3-small + x-oaiTypeLabel: string + encoding_format: enum: - - chat.completion + - float + - base64 type: string - description: 'The object type, which is always `chat.completion`.' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" - ChatCompletionTokenLogprob: + description: 'The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).' + default: float + example: float + dimensions: + minimum: 1 + type: integer + description: "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.\n" + nullable: true + user: + type: string + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + example: user-1234 + additionalProperties: false + CreateEmbeddingResponse: required: - - token - - logprob - - bytes - - top_logprobs + - object + - model + - data + - usage type: object properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - top_logprobs: + data: type: array items: - required: - - token - - logprob - - bytes - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - description: 'List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.' - ListPaginatedFineTuningJobsResponse: - required: - - object - - data - - has_more + $ref: '#/components/schemas/Embedding' + description: The list of embeddings generated by the model. + model: + type: string + description: The name of the model used to generate the embedding. + object: + enum: + - list + type: string + description: 'The object type, which is always "list".' + usage: + required: + - prompt_tokens + - total_tokens + type: object + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + description: The usage information for the request. + CreateFileRequest: + required: + - file + - purpose type: object properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean - object: + file: + type: string + description: "The File object (not file name) to be uploaded.\n" + format: binary + purpose: enum: - - list + - assistants + - batch + - fine-tune + - vision type: string - CreateChatCompletionStreamResponse: + description: "The intended purpose of the uploaded file.\n\nUse \"assistants\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \"vision\" for Assistants image file inputs, \"batch\" for [Batch API](/docs/guides/batch), and \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tuning).\n" + additionalProperties: false + CreateFineTuningJobRequest: required: - - choices - - created - - id - model - - object + - training_file type: object properties: - id: + model: + anyOf: + - type: string + - enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + - gpt-4o-mini + type: string + description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).\n" + example: gpt-4o-mini + x-oaiTypeLabel: string + training_file: type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: + description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + example: file-abc123 + hyperparameters: + type: object + properties: + batch_size: + oneOf: + - enum: + - auto + type: string + - maximum: 256 + minimum: 1 + type: integer + description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" + default: auto + learning_rate_multiplier: + oneOf: + - enum: + - auto + type: string + - minimum: 0 + exclusiveMinimum: true + type: number + description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" + default: auto + n_epochs: + oneOf: + - enum: + - auto + type: string + - maximum: 50 + minimum: 1 + type: integer + description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" + default: auto + description: The hyperparameters used for the fine-tuning job. + suffix: + maxLength: 64 + minLength: 1 + type: string + description: "A string of up to 64 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n" + default: + nullable: true + validation_file: + type: string + description: "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + nullable: true + example: file-abc123 + integrations: type: array items: required: - - delta - - finish_reason - - index + - type + - wandb type: object properties: - delta: - $ref: '#/components/schemas/ChatCompletionStreamResponseDelta' - logprobs: + type: + oneOf: + - enum: + - wandb + type: string + description: "The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n" + wandb: required: - - content - - refusal + - project type: object properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. + project: + type: string + description: "The name of the project that the new run will be created under.\n" + example: my-wandb-project + name: + type: string + description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" nullable: true - refusal: + entity: + type: string + description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" + nullable: true + tags: type: array items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. - nullable: true - finish_reason: - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - nullable: true - index: - type: integer - description: The index of the choice in the list of choices. - description: "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" - created: + type: string + example: custom-tag + description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" + description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" + description: A list of integrations to enable for your fine-tuning job. + nullable: true + seed: + maximum: 2147483647 + minimum: 0 type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - model: + description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" + nullable: true + example: 42 + CreateImageEditRequest: + required: + - prompt + - image + type: object + properties: + image: type: string - description: The model to generate the completion. - service_tier: - enum: - - scale - - default + description: 'The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.' + format: binary + prompt: type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + description: A text description of the desired image(s). The maximum length is 1000 characters. + example: A cute baby sea otter wearing a beret + mask: + type: string + description: 'An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.' + format: binary + model: + anyOf: + - type: string + - enum: + - dall-e-2 + type: string + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + default: dall-e-2 nullable: true - example: scale - system_fingerprint: + example: dall-e-2 + x-oaiTypeLabel: string + n: + maximum: 10 + minimum: 1 + type: integer + description: The number of images to generate. Must be between 1 and 10. + default: 1 + nullable: true + example: 1 + size: + enum: + - 256x256 + - 512x512 + - 1024x1024 type: string - description: "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: + description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' + default: 1024x1024 + nullable: true + example: 1024x1024 + response_format: enum: - - chat.completion.chunk + - url + - b64_json type: string - description: 'The object type, which is always `chat.completion.chunk`.' - usage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - CreateChatCompletionImageResponse: - type: object - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n }\n}\n" + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + default: url + nullable: true + example: url + user: + type: string + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + example: user-1234 CreateImageRequest: required: - prompt @@ -4964,93 +6254,6 @@ components: type: string description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" example: user-1234 - ImagesResponse: - required: - - created - - data - properties: - created: - type: integer - data: - type: array - items: - $ref: '#/components/schemas/Image' - Image: - type: object - properties: - b64_json: - type: string - description: 'The base64-encoded JSON of the generated image, if `response_format` is `b64_json`.' - url: - type: string - description: 'The URL of the generated image, if `response_format` is `url` (default).' - revised_prompt: - type: string - description: 'The prompt that was used to generate the image, if there was any revision to the prompt.' - description: Represents the url or the content of an image generated by the OpenAI API. - x-oaiMeta: - name: The image object - example: "{\n \"url\": \"...\",\n \"revised_prompt\": \"...\"\n}\n" - CreateImageEditRequest: - required: - - prompt - - image - type: object - properties: - image: - type: string - description: 'The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.' - format: binary - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - example: A cute baby sea otter wearing a beret - mask: - type: string - description: 'An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.' - format: binary - model: - anyOf: - - type: string - - enum: - - dall-e-2 - type: string - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - default: dall-e-2 - nullable: true - example: dall-e-2 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: The number of images to generate. Must be between 1 and 10. - default: 1 - nullable: true - example: 1 - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' - default: 1024x1024 - nullable: true - example: 1024x1024 - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 CreateImageVariationRequest: required: - image @@ -5102,6 +6305,61 @@ components: type: string description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" example: user-1234 + CreateMessageRequest: + required: + - role + - content + type: object + properties: + role: + enum: + - user + - assistant + type: string + description: "The role of the entity that is creating the message. Allowed values include:\n- `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n- `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n" + content: + oneOf: + - title: Text content + type: string + description: The text contents of the message. + - title: Array of content parts + minItems: 1 + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentImageUrlObject' + - $ref: '#/components/schemas/MessageRequestContentTextObject' + x-oaiExpandable: true + description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview).' + x-oaiExpandable: true + attachments: + required: + - file_id + - tools + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' + x-oaiExpandable: true + description: The tools to add this file to. + description: 'A list of files attached to the message, and the tools they should be added to.' + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false CreateModerationRequest: required: - input @@ -5435,367 +6693,391 @@ components: description: Represents if a given text input is potentially harmful. x-oaiMeta: name: The moderation object - example: empty - ListFilesResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - object: - enum: - - list - type: string - CreateFileRequest: - required: - - file - - purpose - type: object - properties: - file: - type: string - description: "The File object (not file name) to be uploaded.\n" - format: binary - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nUse \"assistants\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \"vision\" for Assistants image file inputs, \"batch\" for [Batch API](/docs/guides/batch), and \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tuning).\n" - additionalProperties: false - DeleteFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - object: - enum: - - file - type: string - deleted: - type: boolean - CreateUploadRequest: - required: - - filename - - purpose - - bytes - - mime_type - type: object - properties: - filename: - type: string - description: "The name of the file to upload.\n" - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nSee the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).\n" - bytes: - type: integer - description: "The number of bytes in the file you are uploading.\n" - mime_type: - type: string - description: "The MIME type of the file.\n\nThis must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision.\n" - additionalProperties: false - AddUploadPartRequest: - required: - - data - type: object - properties: - data: - type: string - description: "The chunk of bytes for this Part.\n" - format: binary - additionalProperties: false - CompleteUploadRequest: + example: "{\n \"id\": \"modr-0d9740456c391e43c445bf0f010940c7\",\n \"model\": \"omni-moderation-latest\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"harassment\": true,\n \"harassment/threatening\": true,\n \"sexual\": false,\n \"hate\": false,\n \"hate/threatening\": false,\n \"illicit\": false,\n \"illicit/violent\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"violence\": true,\n \"violence/graphic\": true\n },\n \"category_scores\": {\n \"harassment\": 0.8189693396524255,\n \"harassment/threatening\": 0.804985420696006,\n \"sexual\": 1.573112165348997e-6,\n \"hate\": 0.007562942636942845,\n \"hate/threatening\": 0.004208854591835476,\n \"illicit\": 0.030535955153511665,\n \"illicit/violent\": 0.008925306722380033,\n \"self-harm/intent\": 0.00023023930975076432,\n \"self-harm/instructions\": 0.0002293869201073356,\n \"self-harm\": 0.012598046106750154,\n \"sexual/minors\": 2.212566909570261e-8,\n \"violence\": 0.9999992735124786,\n \"violence/graphic\": 0.843064871157054\n },\n \"category_applied_input_types\": {\n \"harassment\": [\n \"text\"\n ],\n \"harassment/threatening\": [\n \"text\"\n ],\n \"sexual\": [\n \"text\",\n \"image\"\n ],\n \"hate\": [\n \"text\"\n ],\n \"hate/threatening\": [\n \"text\"\n ],\n \"illicit\": [\n \"text\"\n ],\n \"illicit/violent\": [\n \"text\"\n ],\n \"self-harm/intent\": [\n \"text\",\n \"image\"\n ],\n \"self-harm/instructions\": [\n \"text\",\n \"image\"\n ],\n \"self-harm\": [\n \"text\",\n \"image\"\n ],\n \"sexual/minors\": [\n \"text\"\n ],\n \"violence\": [\n \"text\",\n \"image\"\n ],\n \"violence/graphic\": [\n \"text\",\n \"image\"\n ]\n }\n }\n ]\n}\n" + CreateRunRequest: required: - - part_ids + - assistant_id type: object properties: - part_ids: - type: array - items: - type: string - description: "The ordered list of Part IDs.\n" - md5: + assistant_id: type: string - description: "The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect.\n" - additionalProperties: false - CancelUploadRequest: - type: object - additionalProperties: false - CreateFineTuningJobRequest: - required: - - model - - training_file - type: object - properties: + description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' model: anyOf: - type: string - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo + - gpt-4o + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 type: string - description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).\n" - example: gpt-4o-mini + description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' + nullable: true + example: gpt-4o x-oaiTypeLabel: string - training_file: - type: string - description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" - example: file-abc123 - hyperparameters: - type: object - properties: - batch_size: - oneOf: - - enum: - - auto - type: string - - maximum: 256 - minimum: 1 - type: integer - description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" - default: auto - learning_rate_multiplier: - oneOf: - - enum: - - auto - type: string - - minimum: 0 - exclusiveMinimum: true - type: number - description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" - default: auto - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" - default: auto - description: The hyperparameters used for the fine-tuning job. - suffix: - maxLength: 64 - minLength: 1 + instructions: type: string - description: "A string of up to 64 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n" - default: + description: 'Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.' nullable: true - validation_file: + additional_instructions: type: string - description: "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. nullable: true - example: file-abc123 - integrations: + additional_messages: type: array items: - required: - - type - - wandb - type: object - properties: - type: - oneOf: - - enum: - - wandb - type: string - description: "The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n" - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - description: A list of integrations to enable for your fine-tuning job. - nullable: true - seed: - maximum: 2147483647 - minimum: 0 - type: integer - description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" + $ref: '#/components/schemas/CreateMessageRequest' + description: Adds additional messages to the thread before creating the run. nullable: true - example: 42 - ListFineTuningJobEventsResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - object: - enum: - - list - type: string - ListFineTuningJobCheckpointsResponse: - required: - - object - - data - - has_more - type: object - properties: - data: + tools: + maxItems: 20 type: array items: - $ref: '#/components/schemas/FineTuningJobCheckpoint' - object: - enum: - - list - type: string - first_id: - type: string + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. nullable: true - last_id: - type: string + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true - has_more: + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 + nullable: true + example: 1 + stream: type: boolean - CreateEmbeddingRequest: + description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" + nullable: true + max_prompt_tokens: + minimum: 256 + type: integer + description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" + nullable: true + max_completion_tokens: + minimum: 256 + type: integer + description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" + nullable: true + truncation_strategy: + $ref: '#/components/schemas/TruncationObject' + tool_choice: + $ref: '#/components/schemas/AssistantsApiToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + additionalProperties: false + CreateSpeechRequest: required: - model - input + - voice type: object properties: - input: - oneOf: - - title: string + model: + anyOf: + - type: string + - enum: + - tts-1 + - tts-1-hd type: string - description: The string that will be turned into an embedding. - default: '' - example: This is a test. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: string - default: '' - example: '[''This is a test.'']' - description: The array of strings that will be turned into an embedding. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: integer - description: The array of integers that will be turned into an embedding. - example: '[1212, 318, 257, 1332, 13]' - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - description: The array of arrays containing integers that will be turned into an embedding. - example: '[[1212, 318, 257, 1332, 13]]' - description: "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - example: The quick brown fox jumped over the lazy dog - x-oaiExpandable: true + description: "One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`\n" + x-oaiTypeLabel: string + input: + maxLength: 4096 + type: string + description: The text to generate audio for. The maximum length is 4096 characters. + voice: + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer + type: string + description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options).' + response_format: + enum: + - mp3 + - opus + - aac + - flac + - wav + - pcm + type: string + description: 'The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.' + default: mp3 + speed: + maximum: 4 + minimum: 0.25 + type: number + description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + default: 1 + additionalProperties: false + CreateThreadAndRunRequest: + required: + - assistant_id + type: object + properties: + assistant_id: + type: string + description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' + thread: + $ref: '#/components/schemas/CreateThreadRequest' model: anyOf: - type: string - enum: - - text-embedding-ada-002 - - text-embedding-3-small - - text-embedding-3-large + - gpt-4o + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: text-embedding-3-small + description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' + nullable: true + example: gpt-4o x-oaiTypeLabel: string - encoding_format: - enum: - - float - - base64 + instructions: type: string - description: 'The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).' - default: float - example: float - dimensions: - minimum: 1 + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + nullable: true + tools: + maxItems: 20 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 + nullable: true + example: 1 + stream: + type: boolean + description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" + nullable: true + max_prompt_tokens: + minimum: 256 type: integer - description: "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.\n" + description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" nullable: true - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 + max_completion_tokens: + minimum: 256 + type: integer + description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" + nullable: true + truncation_strategy: + $ref: '#/components/schemas/TruncationObject' + tool_choice: + $ref: '#/components/schemas/AssistantsApiToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' additionalProperties: false - CreateEmbeddingResponse: - required: - - object - - model - - data - - usage + CreateThreadRequest: type: object properties: - data: + messages: type: array items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - model: - type: string - description: The name of the model used to generate the embedding. - object: - enum: - - list - type: string - description: 'The object type, which is always "list".' - usage: - required: - - prompt_tokens - - total_tokens + $ref: '#/components/schemas/CreateMessageRequest' + description: 'A list of [messages](/docs/api-reference/messages) to start the thread with.' + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + vector_stores: + maxItems: 1 + type: array + items: + type: object + properties: + file_ids: + maxItems: 10000 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" + chunking_strategy: + type: object + oneOf: + - title: Auto Chunking Strategy + required: + - type + type: object + properties: + type: + enum: + - auto + type: string + description: Always `auto`. + additionalProperties: false + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + - title: Static Chunking Strategy + required: + - type + - static + type: object + properties: + type: + enum: + - static + type: string + description: Always `static`. + static: + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + type: object + properties: + max_chunk_size_tokens: + maximum: 4096 + minimum: 100 + type: integer + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" + additionalProperties: false + additionalProperties: false + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + x-oaiExpandable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + x-oaiTypeLabel: map + x-oaiExpandable: true + description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: type: object - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - description: The usage information for the request. + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false CreateTranscriptionRequest: required: - file @@ -5852,76 +7134,6 @@ components: name: The transcription object (JSON) group: audio example: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" - TranscriptionSegment: - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - type: object - properties: - id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - description: Start time of the segment in seconds. - format: float - end: - type: number - description: End time of the segment in seconds. - format: float - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - description: Temperature parameter used for generating the segment. - format: float - avg_logprob: - type: number - description: 'Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.' - format: float - compression_ratio: - type: number - description: 'Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.' - format: float - no_speech_prob: - type: number - description: 'Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent.' - format: float - TranscriptionWord: - required: - - word - - start - - end - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - description: Start time of the word in seconds. - format: float - end: - type: number - description: End time of the word in seconds. - format: float CreateTranscriptionResponseVerboseJson: required: - language @@ -5933,7 +7145,7 @@ components: type: string description: The language of the input audio. duration: - type: number + type: string description: The duration of the input audio. text: type: string @@ -5953,16 +7165,6 @@ components: name: The transcription object (Verbose JSON) group: audio example: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"segments\": [\n {\n \"id\": 0,\n \"seek\": 0,\n \"start\": 0.0,\n \"end\": 3.319999933242798,\n \"text\": \" The beach was a popular spot on a hot summer day.\",\n \"tokens\": [\n 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530\n ],\n \"temperature\": 0.0,\n \"avg_logprob\": -0.2860786020755768,\n \"compression_ratio\": 1.2363636493682861,\n \"no_speech_prob\": 0.00985979475080967\n },\n ...\n ]\n}\n" - AudioResponseFormat: - enum: - - json - - text - - srt - - verbose_json - - vtt - type: string - description: "The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json CreateTranslationRequest: required: - file @@ -6011,7 +7213,7 @@ components: type: string description: The language of the output translation (always `english`). duration: - type: number + type: string description: The duration of the input audio. text: type: string @@ -6021,1423 +7223,961 @@ components: items: $ref: '#/components/schemas/TranscriptionSegment' description: Segments of the translated text and their corresponding details. - CreateSpeechRequest: - required: - - model - - input - - voice - type: object - properties: - model: - anyOf: - - type: string - - enum: - - tts-1 - - tts-1-hd - type: string - description: "One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`\n" - x-oaiTypeLabel: string - input: - maxLength: 4096 - type: string - description: The text to generate audio for. The maximum length is 4096 characters. - voice: - enum: - - alloy - - echo - - fable - - onyx - - nova - - shimmer - type: string - description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options).' - response_format: - enum: - - mp3 - - opus - - aac - - flac - - wav - - pcm - type: string - description: 'The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.' - default: mp3 - speed: - maximum: 4.0 - minimum: 0.25 - type: number - description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. - default: 1 - additionalProperties: false - Model: - title: Model - required: - - id - - object - - created - - owned_by - properties: - id: - type: string - description: 'The model identifier, which can be referenced in the API endpoints.' - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: - enum: - - model - type: string - description: 'The object type, which is always "model".' - owned_by: - type: string - description: The organization that owns the model. - description: Describes an OpenAI model offering that can be used with the API. - x-oaiMeta: - name: The model object - example: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" - OpenAIFile: - title: OpenAIFile + CreateUploadRequest: required: - - id - - object - - bytes - - created_at - filename - purpose - - status + - bytes + - mime_type + type: object properties: - id: - type: string - description: 'The file identifier, which can be referenced in the API endpoints.' - bytes: - type: integer - description: 'The size of the file, in bytes.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. filename: type: string - description: The name of the file. - object: - enum: - - file - type: string - description: 'The object type, which is always `file`.' + description: "The name of the file to upload.\n" purpose: enum: - assistants - - assistants_output - batch - - batch_output - fine-tune - - fine-tune-results - vision type: string - description: 'The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.' - status: - enum: - - uploaded - - processed - - error - type: string - description: 'Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.' - deprecated: true - status_details: + description: "The intended purpose of the uploaded file.\n\nSee the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).\n" + bytes: + type: integer + description: "The number of bytes in the file you are uploading.\n" + mime_type: type: string - description: 'Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.' - deprecated: true - description: The `File` object represents a document that has been uploaded to OpenAI. - x-oaiMeta: - name: The file object - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" - Upload: - title: Upload + description: "The MIME type of the file.\n\nThis must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision.\n" + additionalProperties: false + CreateVectorStoreFileBatchRequest: + required: + - file_ids + type: object + properties: + file_ids: + maxItems: 500 + minItems: 1 + type: array + items: + type: string + description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' + chunking_strategy: + $ref: '#/components/schemas/ChunkingStrategyRequestParam' + additionalProperties: false + CreateVectorStoreFileRequest: required: - - bytes - - created_at - - expires_at - - filename - - id - - purpose - - status - - step_number + - file_id type: object properties: - id: - type: string - description: 'The Upload unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: - type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: + file_id: type: string - description: 'The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.' - status: - enum: - - pending - - completed - - cancelled - - expired + description: 'A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.' + chunking_strategy: + $ref: '#/components/schemas/ChunkingStrategyRequestParam' + additionalProperties: false + CreateVectorStoreRequest: + type: object + properties: + file_ids: + maxItems: 500 + type: array + items: + type: string + description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' + name: type: string - description: The status of the Upload. - expires_at: + description: The name of the vector store. + expires_after: + $ref: '#/components/schemas/VectorStoreExpirationAfter' + chunking_strategy: + type: object + oneOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.' + x-oaiExpandable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false + DefaultProjectErrorResponse: + required: + - code + - message + type: object + properties: + code: type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - object: - enum: - - upload + message: type: string - description: 'The object type, which is always "upload".' - file: - $ref: '#/components/schemas/OpenAIFile' - description: "The Upload object can accept byte chunks in the form of Parts.\n" - x-oaiMeta: - name: The upload object - example: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - UploadPart: - title: UploadPart + DeleteAssistantResponse: required: - - created_at - id - object - - upload_id + - deleted type: object properties: id: type: string - description: 'The upload Part unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: - type: string - description: The ID of the Upload object that this Part was added to. + deleted: + type: boolean object: enum: - - upload.part + - assistant.deleted type: string - description: 'The object type, which is always `upload.part`.' - description: "The upload Part represents a chunk of bytes we can add to an Upload object.\n" - x-oaiMeta: - name: The upload part object - example: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719186911,\n \"upload_id\": \"upload_abc123\"\n}\n" - Embedding: + DeleteFileResponse: required: - - index + - id - object - - embedding + - deleted type: object properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: - type: array - items: - type: number - description: "The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings).\n" + id: + type: string object: enum: - - embedding + - file type: string - description: 'The object type, which is always "embedding".' - description: "Represents an embedding vector returned by embedding endpoint.\n" - x-oaiMeta: - name: The embedding object - example: "{\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n}\n" - FineTuningJob: - title: FineTuningJob + deleted: + type: boolean + DeleteMessageResponse: required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - id - - model - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed + - deleted type: object properties: id: type: string - description: 'The object identifier, which can be referenced in the API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: - required: - - code - - message - - param - type: object - properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: - type: string - description: 'The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific.' - nullable: true - description: 'For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure.' - nullable: true - fine_tuned_model: - type: string - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - nullable: true - finished_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - nullable: true - hyperparameters: - required: - - n_epochs - type: object - properties: - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n\"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs." - default: auto - description: 'The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.' - model: - type: string - description: The base model that is being fine-tuned. + deleted: + type: boolean object: enum: - - fine_tuning.job - type: string - description: 'The object type, which is always "fine_tuning.job".' - organization_id: - type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - items: - type: string - example: file-abc123 - description: 'The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents).' - status: - enum: - - validating_files - - queued - - running - - succeeded - - failed - - cancelled - type: string - description: 'The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.' - trained_tokens: - type: integer - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - nullable: true - training_file: - type: string - description: 'The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents).' - validation_file: + - thread.message.deleted type: string - description: 'The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents).' - nullable: true - integrations: - maxItems: 5 - type: array - items: - oneOf: - - $ref: '#/components/schemas/FineTuningIntegration' - x-oaiExpandable: true - description: A list of integrations to enable for this fine-tuning job. - nullable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - nullable: true - description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" - x-oaiMeta: - name: The fine-tuning job object - example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - FineTuningIntegration: - title: Fine-Tuning Job Integration + DeleteModelResponse: required: - - type - - wandb + - id + - object + - deleted type: object properties: - type: - enum: - - wandb + id: type: string - description: The type of the integration being enabled for the fine-tuning job - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - FineTuningJobEvent: + deleted: + type: boolean + object: + type: string + DeleteThreadResponse: required: - id - object - - created_at - - level - - message + - deleted type: object properties: id: type: string - created_at: - type: integer - level: + deleted: + type: boolean + object: enum: - - info - - warn - - error + - thread.deleted type: string - message: + DeleteVectorStoreFileResponse: + required: + - id + - object + - deleted + type: object + properties: + id: type: string + deleted: + type: boolean object: enum: - - fine_tuning.job.event + - vector_store.file.deleted type: string - description: Fine-tuning job event object - x-oaiMeta: - name: The fine-tuning job event object - example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" - FineTuningJobCheckpoint: - title: FineTuningJobCheckpoint + DeleteVectorStoreResponse: required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - id - - metrics - object - - step_number + - deleted type: object properties: id: type: string - description: 'The checkpoint identifier, which can be referenced in the API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: + deleted: + type: boolean + object: + enum: + - vector_store.deleted type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: - type: integer - description: The step number that the checkpoint was created at. - metrics: - type: object - properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - description: Metrics at the step number during the fine-tuning job. - fine_tuning_job_id: + DoneEvent: + required: + - event + - data + type: object + properties: + event: + enum: + - done type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: + data: enum: - - fine_tuning.job.checkpoint + - '[DONE]' type: string - description: 'The object type, which is always "fine_tuning.job.checkpoint".' - description: "The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use.\n" + description: Occurs when a stream ends. x-oaiMeta: - name: The fine-tuning job checkpoint object - example: "{\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P\",\n \"created_at\": 1712211699,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88\",\n \"fine_tuning_job_id\": \"ftjob-fpbNQ3H1GrMehXRf8cO97xTN\",\n \"metrics\": {\n \"step\": 88,\n \"train_loss\": 0.478,\n \"train_mean_token_accuracy\": 0.924,\n \"valid_loss\": 10.112,\n \"valid_mean_token_accuracy\": 0.145,\n \"full_valid_loss\": 0.567,\n \"full_valid_mean_token_accuracy\": 0.944\n },\n \"step_number\": 88\n}\n" - FinetuneChatRequestInput: + dataDescription: '`data` is `[DONE]`' + Embedding: + required: + - index + - object + - embedding type: object properties: - messages: - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: A list of tools the model may generate JSON inputs for. - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - functions: - maxItems: 128 - minItems: 1 + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: type: array items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: A list of functions the model may generate JSON inputs for. - deprecated: true - description: The per-line training example of a fine-tuning input file for chat models + type: number + description: "The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings).\n" + object: + enum: + - embedding + type: string + description: 'The object type, which is always "embedding".' + description: "Represents an embedding vector returned by embedding endpoint.\n" x-oaiMeta: - name: Training format for chat models - example: "{\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" },\n {\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": \"call_id\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco, USA\\\", \\\"format\\\": \\\"celsius\\\"}\"\n }\n }\n ]\n }\n ],\n \"parallel_tool_calls\": false,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and country, eg. San Francisco, USA\"\n },\n \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"] }\n },\n \"required\": [\"location\", \"format\"]\n }\n }\n }\n ]\n}\n" - FinetuneCompletionRequestInput: + name: The embedding object + example: "{\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n}\n" + Error: + required: + - type + - message + - param + - code type: object properties: - prompt: + code: type: string - description: The input prompt for this training example. - completion: + nullable: true + message: type: string - description: The desired completion for this training example. - description: The per-line training example of a fine-tuning input file for completions models + param: + type: string + nullable: true + type: + type: string + ErrorEvent: + required: + - event + - data + type: object + properties: + event: + enum: + - error + type: string + data: + $ref: '#/components/schemas/Error' + description: 'Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.' x-oaiMeta: - name: Training format for completions models - example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" - CompletionUsage: + dataDescription: '`data` is an [error](/docs/guides/error-codes/api-errors)' + ErrorResponse: required: - - prompt_tokens - - completion_tokens - - total_tokens + - error type: object properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - completion_tokens_details: + error: + $ref: '#/components/schemas/Error' + FileSearchRankingOptions: + title: File search tool call ranking options + required: + - score_threshold + type: object + properties: + ranker: + enum: + - auto + - default_2024_08_21 + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + score_threshold: + maximum: 1 + minimum: 0 + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + description: "The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" + FineTuneChatCompletionRequestAssistantMessage: + required: + - role + allOf: + - title: Assistant message type: object properties: - reasoning_tokens: + weight: + enum: + - 0 + - 1 type: integer - description: Tokens generated by the model for reasoning. - description: Breakdown of tokens used in a completion. - description: Usage statistics for the completion request. - RunCompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: 'Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).' - nullable: true - RunStepCompletionUsage: + description: Controls whether the assistant message is trained against (0 or 1) + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + FineTuningIntegration: + title: Fine-Tuning Job Integration required: - - prompt_tokens - - completion_tokens - - total_tokens + - type + - wandb type: object properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. - nullable: true - AssistantsApiResponseFormatOption: - oneOf: - - enum: - - auto + type: + enum: + - wandb type: string - description: "`auto` is the default value\n" - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true - AssistantObject: - title: Assistant + description: The type of the integration being enabled for the fine-tuning job + wandb: + required: + - project + type: object + properties: + project: + type: string + description: "The name of the project that the new run will be created under.\n" + example: my-wandb-project + name: + type: string + description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" + nullable: true + entity: + type: string + description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" + nullable: true + tags: + type: array + items: + type: string + example: custom-tag + description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" + description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" + FineTuningJob: + title: FineTuningJob required: - - id - - object - created_at - - name - - description + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id - model - - instructions - - tools - - metadata + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + - seed type: object properties: id: type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - assistant - type: string - description: 'The object type, which is always `assistant`.' + description: 'The object identifier, which can be referenced in the API endpoints.' created_at: type: integer - description: The Unix timestamp (in seconds) for when the assistant was created. - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - model: - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + required: + - code + - message + - param type: object properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: Represents an `assistant` that can call the model and use tools. - x-oaiMeta: - name: The assistant object - beta: true - example: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - CreateAssistantRequest: - required: - - model - type: object - properties: - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 + code: type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: gpt-4o - x-oaiTypeLabel: string - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: 'The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific.' + nullable: true + description: 'For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure.' nullable: true - instructions: - maxLength: 256000 + fine_tuned_model: type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: + finished_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + nullable: true + hyperparameters: + required: + - n_epochs type: object properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object + n_epochs: oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - x-oaiTypeLabel: map - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ModifyAssistantRequest: - type: object - properties: + - enum: + - auto + type: string + - maximum: 50 + minimum: 1 + type: integer + description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n\"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs." + default: auto + description: 'The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.' model: - anyOf: - - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - name: - maxLength: 256 type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 + description: The base model that is being fine-tuned. + object: + enum: + - fine_tuning.job type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" + description: 'The object type, which is always "fine_tuning.job".' + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + items: + type: string + example: file-abc123 + description: 'The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents).' + status: + enum: + - validating_files + - queued + - running + - succeeded + - failed + - cancelled + type: string + description: 'The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.' + trained_tokens: + type: integer + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. nullable: true - instructions: - maxLength: 256000 + training_file: type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" + description: 'The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents).' + validation_file: + type: string + description: 'The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents).' nullable: true - tools: - maxItems: 128 + integrations: + maxItems: 5 type: array items: oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' + - $ref: '#/components/schemas/FineTuningIntegration' x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 + description: A list of integrations to enable for this fine-tuning job. nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - DeleteAssistantResponse: + description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" + x-oaiMeta: + name: The fine-tuning job object + example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" + FineTuningJobCheckpoint: + title: FineTuningJobCheckpoint required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint - id + - metrics - object - - deleted + - step_number type: object properties: id: type: string - deleted: - type: boolean + description: 'The checkpoint identifier, which can be referenced in the API endpoints.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + description: Metrics at the step number during the fine-tuning job. + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created from. object: enum: - - assistant.deleted + - fine_tuning.job.checkpoint type: string - ListAssistantsResponse: + description: 'The object type, which is always "fine_tuning.job.checkpoint".' + description: "The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use.\n" + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: "{\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P\",\n \"created_at\": 1712211699,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88\",\n \"fine_tuning_job_id\": \"ftjob-fpbNQ3H1GrMehXRf8cO97xTN\",\n \"metrics\": {\n \"step\": 88,\n \"train_loss\": 0.478,\n \"train_mean_token_accuracy\": 0.924,\n \"valid_loss\": 10.112,\n \"valid_mean_token_accuracy\": 0.145,\n \"full_valid_loss\": 0.567,\n \"full_valid_mean_token_accuracy\": 0.944\n },\n \"step_number\": 88\n}\n" + FineTuningJobEvent: required: + - id - object - - data - - first_id - - last_id - - has_more + - created_at + - level + - message type: object properties: + id: + type: string + created_at: + type: integer + level: + enum: + - info + - warn + - error + type: string + message: + type: string object: + enum: + - fine_tuning.job.event type: string - example: list - data: + description: Fine-tuning job event object + x-oaiMeta: + name: The fine-tuning job event object + example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" + FinetuneChatRequestInput: + type: object + properties: + messages: + minItems: 1 type: array items: - $ref: '#/components/schemas/AssistantObject' - first_id: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: A list of tools the model may generate JSON inputs for. + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + functions: + maxItems: 128 + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + description: A list of functions the model may generate JSON inputs for. + deprecated: true + description: The per-line training example of a fine-tuning input file for chat models + x-oaiMeta: + name: Training format for chat models + example: "{\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" },\n {\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": \"call_id\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco, USA\\\", \\\"format\\\": \\\"celsius\\\"}\"\n }\n }\n ]\n }\n ],\n \"parallel_tool_calls\": false,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and country, eg. San Francisco, USA\"\n },\n \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"] }\n },\n \"required\": [\"location\", \"format\"]\n }\n }\n }\n ]\n}\n" + FinetuneCompletionRequestInput: + type: object + properties: + prompt: type: string - example: asst_abc123 - last_id: + description: The input prompt for this training example. + completion: type: string - example: asst_abc456 - has_more: - type: boolean - example: false + description: The desired completion for this training example. + description: The per-line training example of a fine-tuning input file for completions models x-oaiMeta: - name: List assistants response object - group: chat - example: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - AssistantToolsCode: - title: Code interpreter tool + name: Training format for completions models + example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" + FunctionObject: required: - - type + - name type: object properties: - type: - enum: - - code_interpreter + description: type: string - description: 'The type of tool being defined: `code_interpreter`' - AssistantToolsFileSearch: - title: FileSearch tool - required: - - type + description: 'A description of what the function does, used by the model to choose when and how to call the function.' + name: + type: string + description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' + parameters: + $ref: '#/components/schemas/FunctionParameters' + strict: + type: boolean + description: 'Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).' + default: false + nullable: true + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + Image: type: object properties: - type: - enum: - - file_search + b64_json: type: string - description: 'The type of tool being defined: `file_search`' - file_search: - type: object - properties: - max_num_results: - maximum: 50 - minimum: 1 - type: integer - description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" - ranking_options: - $ref: '#/components/schemas/FileSearchRankingOptions' - description: Overrides for the file search tool. - FileSearchRankingOptions: - title: File search tool call ranking options + description: 'The base64-encoded JSON of the generated image, if `response_format` is `b64_json`.' + url: + type: string + description: 'The URL of the generated image, if `response_format` is `url` (default).' + revised_prompt: + type: string + description: 'The prompt that was used to generate the image, if there was any revision to the prompt.' + description: Represents the url or the content of an image generated by the OpenAI API. + x-oaiMeta: + name: The image object + example: "{\n \"url\": \"...\",\n \"revised_prompt\": \"...\"\n}\n" + ImagesResponse: required: - - score_threshold + - created + - data + properties: + created: + type: integer + data: + type: array + items: + $ref: '#/components/schemas/Image' + Invite: + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at type: object properties: - ranker: + object: enum: - - auto - - default_2024_08_21 + - organization.invite type: string - description: The ranker to use for the file search. If not specified will use the `auto` ranker. - score_threshold: - maximum: 1 - minimum: 0 - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - description: "The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search/customizing-file-search-settings) for more information.\n" - AssistantToolsFileSearchTypeOnly: - title: FileSearch tool + description: 'The object type, which is always `organization.invite`' + id: + type: string + description: 'The identifier, which can be referenced in API endpoints' + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + enum: + - owner + - reader + type: string + description: '`owner` or `reader`' + status: + enum: + - accepted + - expired + - pending + type: string + description: '`accepted`,`expired`, or `pending`' + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + description: Represents an individual `invite` to the organization. + x-oaiMeta: + name: The invite object + example: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" + InviteDeleteResponse: required: - - type + - object + - id + - deleted type: object properties: - type: + object: enum: - - file_search + - organization.invite.deleted type: string - description: 'The type of tool being defined: `file_search`' - AssistantToolsFunction: - title: Function tool + description: 'The object type, which is always `organization.invite.deleted`' + id: + type: string + deleted: + type: boolean + InviteListResponse: required: - - type - - function + - object + - data type: object properties: - type: + object: enum: - - function + - list type: string - description: 'The type of tool being defined: `function`' - function: - $ref: '#/components/schemas/FunctionObject' - TruncationObject: - title: Thread Truncation Controls + description: 'The object type, which is always `list`' + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. + InviteRequest: required: - - type + - email + - role type: object properties: - type: - enum: - - auto - - last_messages + email: type: string - description: 'The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.' - last_messages: - minimum: 1 - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - nullable: true - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - AssistantsApiToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required + description: Send an email to this address + role: + enum: + - reader + - owner type: string - description: "`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.\n" - - $ref: '#/components/schemas/AssistantsNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n" - x-oaiExpandable: true - AssistantsNamedToolChoice: + description: '`owner` or `reader`' + ListAssistantsResponse: required: - - type + - object + - data + - first_id + - last_id + - has_more type: object properties: - type: - enum: - - function - - code_interpreter - - file_search + object: type: string - description: 'The type of the tool. If type is `function`, the function name must be set' - function: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - RunObject: - title: A run on a thread + example: list + data: + type: array + items: + $ref: '#/components/schemas/AssistantObject' + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + x-oaiMeta: + name: List assistants response object + group: chat + example: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" + ListAuditLogsResponse: required: - - id - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format + - data + - first_id + - last_id + - has_more type: object properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - thread.run - type: string - description: 'The object type, which is always `thread.run`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was created. - thread_id: + - list type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run.' - assistant_id: + data: + type: array + items: + $ref: '#/components/schemas/AuditLog' + first_id: type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run.' - status: - enum: - - queued - - in_progress - - requires_action - - cancelling - - cancelled - - failed - - completed - - incomplete - - expired + example: audit_log-defb456h8dks + last_id: type: string - description: 'The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.' - required_action: - required: - - type - - submit_tool_outputs - type: object - properties: - type: - enum: - - submit_tool_outputs - type: string - description: 'For now, this is always `submit_tool_outputs`.' - submit_tool_outputs: - required: - - tool_calls - type: object - properties: - tool_calls: - type: array - items: - $ref: '#/components/schemas/RunToolCallObject' - description: A list of the relevant tool calls. - description: Details on the tool outputs needed for this run to continue. - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - - invalid_prompt - type: string - description: 'One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.' - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the run will expire. - nullable: true - started_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was started. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was completed. - nullable: true - incomplete_details: - type: object - properties: - reason: - enum: - - max_completion_tokens - - max_prompt_tokens - type: string - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - nullable: true - model: + example: audit_log-hnbkd8s93s + has_more: + type: boolean + ListBatchesResponse: + required: + - object + - data + - has_more + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Batch' + first_id: type: string - description: 'The model that the [assistant](/docs/api-reference/assistants) used for this run.' - instructions: + example: batch_abc123 + last_id: type: string - description: 'The instructions that the [assistant](/docs/api-reference/assistants) used for this run.' - tools: - maxItems: 20 + example: batch_abc456 + has_more: + type: boolean + object: + enum: + - list + type: string + ListFilesResponse: + required: + - object + - data + type: object + properties: + data: type: array items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: 'The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.' - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunCompletionUsage' - temperature: - type: number - description: 'The sampling temperature used for this run. If not set, defaults to 1.' - nullable: true - top_p: - type: number - description: 'The nucleus sampling value used for this run. If not set, defaults to 1.' - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens specified to have been used over the course of the run.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens specified to have been used over the course of the run.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: 'Represents an execution run on a [thread](/docs/api-reference/threads).' - x-oaiMeta: - name: The run object - beta: true - example: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - CreateRunRequest: + $ref: '#/components/schemas/OpenAIFile' + object: + enum: + - list + type: string + ListFineTuningJobCheckpointsResponse: required: - - thread_id - - assistant_id + - object + - data + - has_more type: object properties: - assistant_id: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobCheckpoint' + object: + enum: + - list type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: + first_id: type: string - description: 'Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.' nullable: true - additional_instructions: + last_id: type: string - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. nullable: true - additional_messages: + has_more: + type: boolean + ListFineTuningJobEventsResponse: + required: + - object + - data + type: object + properties: + data: type: array items: - $ref: '#/components/schemas/CreateMessageRequest' - description: Adds additional messages to the thread before creating the run. - nullable: true - tools: - maxItems: 20 + $ref: '#/components/schemas/FineTuningJobEvent' + object: + enum: + - list + type: string + ListMessagesResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: type: array items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: + $ref: '#/components/schemas/MessageObject' + first_id: + type: string + example: msg_abc123 + last_id: + type: string + example: msg_abc123 + has_more: + type: boolean + example: false + ListModelsResponse: + required: + - object + - data + type: object + properties: + object: + enum: + - list + type: string + data: + type: array + items: + $ref: '#/components/schemas/Model' + ListPaginatedFineTuningJobsResponse: + required: + - object + - data + - has_more + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJob' + has_more: + type: boolean + object: + enum: + - list + type: string + ListRunStepsResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/RunStepObject' + first_id: + type: string + example: step_abc123 + last_id: + type: string + example: step_abc456 + has_more: type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false + example: false ListRunsResponse: required: - object @@ -7463,419 +8203,460 @@ components: has_more: type: boolean example: false - ModifyRunRequest: + ListThreadsResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/ThreadObject' + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + ListVectorStoreFilesResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/VectorStoreFileObject' + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + ListVectorStoresResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/VectorStoreObject' + first_id: + type: string + example: vs_abc123 + last_id: + type: string + example: vs_abc456 + has_more: + type: boolean + example: false + MessageContentImageFileObject: + title: Image file + required: + - type + - image_file type: object properties: - metadata: + type: + enum: + - image_file + type: string + description: Always `image_file`. + image_file: + required: + - file_id type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - SubmitToolOutputsRunRequest: + properties: + file_id: + type: string + description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' + default: auto + description: 'References an image [File](/docs/api-reference/files) in the content of a message.' + MessageContentImageUrlObject: + title: Image URL + required: + - type + - image_url + type: object + properties: + type: + enum: + - image_url + type: string + description: The type of the content part. + image_url: + required: + - url + type: object + properties: + url: + type: string + description: 'The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' + format: uri + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`' + default: auto + description: References an image URL in the content of a message. + MessageContentRefusalObject: + title: Refusal + required: + - type + - refusal + type: object + properties: + type: + enum: + - refusal + type: string + description: Always `refusal`. + refusal: + type: string + description: The refusal content generated by the assistant. + MessageContentTextAnnotationsFileCitationObject: + title: File citation + required: + - type + - text + - file_citation + - start_index + - end_index + type: object + properties: + type: + enum: + - file_citation + type: string + description: Always `file_citation`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_citation: + required: + - file_id + type: object + properties: + file_id: + type: string + description: The ID of the specific File the citation is from. + start_index: + minimum: 0 + type: integer + end_index: + minimum: 0 + type: integer + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + MessageContentTextAnnotationsFilePathObject: + title: File path + required: + - type + - text + - file_path + - start_index + - end_index + type: object + properties: + type: + enum: + - file_path + type: string + description: Always `file_path`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_path: + required: + - file_id + type: object + properties: + file_id: + type: string + description: The ID of the file that was generated. + start_index: + minimum: 0 + type: integer + end_index: + minimum: 0 + type: integer + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + MessageContentTextObject: + title: Text + required: + - type + - text + type: object + properties: + type: + enum: + - text + type: string + description: Always `text`. + text: + required: + - value + - annotations + type: object + properties: + value: + type: string + description: The data that makes up the text. + annotations: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + description: The text content that is part of a message. + MessageDeltaContentImageFileObject: + title: Image file required: - - tool_outputs + - index + - type type: object properties: - tool_outputs: - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - description: A list of tools for which the outputs are being submitted. - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - additionalProperties: false - RunToolCallObject: + index: + type: integer + description: The index of the content part in the message. + type: + enum: + - image_file + type: string + description: Always `image_file`. + image_file: + type: object + properties: + file_id: + type: string + description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' + default: auto + description: 'References an image [File](/docs/api-reference/files) in the content of a message.' + MessageDeltaContentImageUrlObject: + title: Image URL required: - - id + - index - type - - function type: object properties: - id: - type: string - description: 'The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint.' + index: + type: integer + description: The index of the content part in the message. type: enum: - - function + - image_url type: string - description: 'The type of tool call the output is required for. For now, this is always `function`.' - function: - required: - - name - - arguments + description: Always `image_url`. + image_url: type: object properties: - name: + url: type: string - description: The name of the function. - arguments: + description: 'The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' + detail: + enum: + - auto + - low + - high type: string - description: The arguments that the model expects you to pass to the function. - description: The function definition. - description: Tool call objects - CreateThreadAndRunRequest: + description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.' + default: auto + description: References an image URL in the content of a message. + MessageDeltaContentRefusalObject: + title: Refusal required: - - thread_id - - assistant_id + - index + - type type: object properties: - assistant_id: + index: + type: integer + description: The index of the refusal part in the message. + type: + enum: + - refusal type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - thread: - $ref: '#/components/schemas/CreateThreadRequest' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: + description: Always `refusal`. + refusal: type: string - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. - nullable: true - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - tool_resources: + description: The refusal content that is part of a message. + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + enum: + - file_citation + type: string + description: Always `file_citation`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_citation: type: object properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 + file_id: + type: string + description: The ID of the specific File the citation is from. + quote: + type: string + description: The specific quote in the file. + start_index: minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 + end_index: + minimum: 0 type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ThreadObject: - title: Thread - required: - - id - - object - - created_at - - tool_resources - - metadata + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + required: + - index + - type type: object properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: + index: + type: integer + description: The index of the annotation in the text content part. + type: enum: - - thread + - file_path type: string - description: 'The object type, which is always `thread`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the thread was created. - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a thread that contains [messages](/docs/api-reference/messages).' - x-oaiMeta: - name: The thread object - beta: true - example: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" - CreateThreadRequest: - type: object - properties: - messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: 'A list of [messages](/docs/api-reference/messages) to start the thread with.' - tool_resources: + description: Always `file_path`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_path: type: object properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - x-oaiTypeLabel: map - x-oaiExpandable: true - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ModifyThreadRequest: + file_id: + type: string + description: The ID of the file that was generated. + start_index: + minimum: 0 + type: integer + end_index: + minimum: 0 + type: integer + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + MessageDeltaContentTextObject: + title: Text + required: + - index + - type type: object properties: - tool_resources: + index: + type: integer + description: The index of the content part in the message. + type: + enum: + - text + type: string + description: Always `text`. + text: type: object properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteThreadResponse: + value: + type: string + description: The data that makes up the text. + annotations: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + description: The text content that is part of a message. + MessageDeltaObject: + title: Message delta object required: - id - object - - deleted + - delta type: object properties: id: type: string - deleted: - type: boolean + description: 'The identifier of the message, which can be referenced in API endpoints.' object: enum: - - thread.deleted - type: string - ListThreadsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/ThreadObject' - first_id: - type: string - example: asst_abc123 - last_id: + - thread.message.delta type: string - example: asst_abc456 - has_more: - type: boolean - example: false + description: 'The object type, which is always `thread.message.delta`.' + delta: + type: object + properties: + role: + enum: + - user + - assistant + type: string + description: The entity that produced the message. One of `user` or `assistant`. + content: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageDeltaContentImageFileObject' + - $ref: '#/components/schemas/MessageDeltaContentTextObject' + - $ref: '#/components/schemas/MessageDeltaContentRefusalObject' + - $ref: '#/components/schemas/MessageDeltaContentImageUrlObject' + x-oaiExpandable: true + description: The content of the message in array of text and/or images. + description: The delta containing the fields that have changed on the Message. + description: "Represents a message delta i.e. any changed fields on a message during streaming.\n" + x-oaiMeta: + name: The message delta object + beta: true + example: "{\n \"id\": \"msg_123\",\n \"object\": \"thread.message.delta\",\n \"delta\": {\n \"content\": [\n {\n \"index\": 0,\n \"type\": \"text\",\n \"text\": { \"value\": \"Hello\", \"annotations\": [] }\n }\n ]\n }\n}\n" MessageObject: title: The message object required: @@ -7928,2797 +8709,3709 @@ components: description: The reason the message is incomplete. description: 'On an incomplete message, details about why the message is incomplete.' nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was completed. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the message was completed. + nullable: true + incomplete_at: + type: integer + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + nullable: true + role: + enum: + - user + - assistant + type: string + description: The entity that produced the message. One of `user` or `assistant`. + content: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentImageUrlObject' + - $ref: '#/components/schemas/MessageContentTextObject' + - $ref: '#/components/schemas/MessageContentRefusalObject' + x-oaiExpandable: true + description: The content of the message in array of text and/or images. + assistant_id: + type: string + description: 'If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message.' + nullable: true + run_id: + type: string + description: 'The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints.' + nullable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' + x-oaiExpandable: true + description: The tools to add this file to. + description: 'A list of files attached to the message, and the tools they were added to.' + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + description: 'Represents a message within a [thread](/docs/api-reference/threads).' + x-oaiMeta: + name: The message object + beta: true + example: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" + MessageRequestContentTextObject: + title: Text + required: + - type + - text + type: object + properties: + type: + enum: + - text + type: string + description: Always `text`. + text: + type: string + description: Text content to be sent to the model + description: The text content that is part of a message. + MessageStreamEvent: + oneOf: + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.created + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.in_progress + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.delta + type: string + data: + $ref: '#/components/schemas/MessageDeltaObject' + description: 'Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed.' + x-oaiMeta: + dataDescription: '`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.completed + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) is completed.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.incomplete + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + Model: + title: Model + required: + - id + - object + - created + - owned_by + properties: + id: + type: string + description: 'The model identifier, which can be referenced in the API endpoints.' + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + enum: + - model + type: string + description: 'The object type, which is always "model".' + owned_by: + type: string + description: The organization that owns the model. + description: Describes an OpenAI model offering that can be used with the API. + x-oaiMeta: + name: The model object + example: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" + ModifyAssistantRequest: + type: object + properties: + model: + anyOf: + - type: string + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + name: + maxLength: 256 + type: string + description: "The name of the assistant. The maximum length is 256 characters.\n" + nullable: true + description: + maxLength: 512 + type: string + description: "The description of the assistant. The maximum length is 512 characters.\n" + nullable: true + instructions: + maxLength: 256000 + type: string + description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" + nullable: true + tools: + maxItems: 128 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 nullable: true - incomplete_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 nullable: true - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageContentTextObject' - - $ref: '#/components/schemas/MessageContentRefusalObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - assistant_id: - type: string - description: 'If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message.' + example: 1 + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + additionalProperties: false + ModifyMessageRequest: + type: object + properties: + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true - run_id: - type: string - description: 'The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints.' + x-oaiTypeLabel: map + additionalProperties: false + ModifyRunRequest: + type: object + properties: + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they were added to.' + x-oaiTypeLabel: map + additionalProperties: false + ModifyThreadRequest: + type: object + properties: + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" nullable: true metadata: type: object description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true x-oaiTypeLabel: map - description: 'Represents a message within a [thread](/docs/api-reference/threads).' - x-oaiMeta: - name: The message object - beta: true - example: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" - MessageDeltaObject: - title: Message delta object + additionalProperties: false + OpenAIFile: + title: OpenAIFile required: - id - object - - delta - type: object + - bytes + - created_at + - filename + - purpose + - status properties: id: type: string - description: 'The identifier of the message, which can be referenced in API endpoints.' + description: 'The file identifier, which can be referenced in the API endpoints.' + bytes: + type: integer + description: 'The size of the file, in bytes.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. object: enum: - - thread.message.delta + - file type: string - description: 'The object type, which is always `thread.message.delta`.' - delta: - type: object - properties: - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentImageFileObject' - - $ref: '#/components/schemas/MessageDeltaContentTextObject' - - $ref: '#/components/schemas/MessageDeltaContentRefusalObject' - - $ref: '#/components/schemas/MessageDeltaContentImageUrlObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - description: The delta containing the fields that have changed on the Message. - description: "Represents a message delta i.e. any changed fields on a message during streaming.\n" + description: 'The object type, which is always `file`.' + purpose: + enum: + - assistants + - assistants_output + - batch + - batch_output + - fine-tune + - fine-tune-results + - vision + type: string + description: 'The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.' + status: + enum: + - uploaded + - processed + - error + type: string + description: 'Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.' + deprecated: true + status_details: + type: string + description: 'Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.' + deprecated: true + description: The `File` object represents a document that has been uploaded to OpenAI. x-oaiMeta: - name: The message delta object - beta: true - example: "{\n \"id\": \"msg_123\",\n \"object\": \"thread.message.delta\",\n \"delta\": {\n \"content\": [\n {\n \"index\": 0,\n \"type\": \"text\",\n \"text\": { \"value\": \"Hello\", \"annotations\": [] }\n }\n ]\n }\n}\n" - CreateMessageRequest: + name: The file object + example: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" + OtherChunkingStrategyResponseParam: + title: Other Chunking Strategy required: - - role - - content + - type type: object properties: - role: + type: enum: - - user - - assistant + - other type: string - description: "The role of the entity that is creating the message. Allowed values include:\n- `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n- `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n" - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageRequestContentTextObject' - x-oaiExpandable: true - description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview).' - x-oaiExpandable: true - attachments: - required: - - file_id - - tools - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they should be added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map + description: Always `other`. additionalProperties: false - ModifyMessageRequest: + description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' + ParallelToolCalls: + type: boolean + description: 'Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.' + nullable: true + Project: + required: + - id + - object + - name + - created_at + - status type: object properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + id: + type: string + description: 'The identifier, which can be referenced in API endpoints' + object: + enum: + - organization.project + type: string + description: 'The object type, which is always `organization.project`' + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was archived or `null`. nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteMessageResponse: + status: + enum: + - active + - archived + type: string + description: '`active` or `archived`' + description: Represents an individual project. + x-oaiMeta: + name: The project object + example: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + ProjectApiKey: required: + - object + - redacted_value + - name + - created_at - id + - owner + type: object + properties: + object: + enum: + - organization.project.api_key + type: string + description: 'The object type, which is always `organization.project.api_key`' + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: 'The identifier, which can be referenced in API endpoints' + owner: + type: object + properties: + type: + enum: + - user + - service_account + type: string + description: '`user` or `service_account`' + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + description: Represents an individual API key in a project. + x-oaiMeta: + name: The project API key object + example: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n }\n}\n" + ProjectApiKeyDeleteResponse: + required: - object + - id - deleted type: object properties: + object: + enum: + - organization.project.api_key.deleted + type: string id: type: string deleted: type: boolean + ProjectApiKeyListResponse: + required: + - object + - data + - first_id + - last_id + - has_more + type: object + properties: object: enum: - - thread.message.deleted + - list type: string - ListMessagesResponse: + data: + type: array + items: + $ref: '#/components/schemas/ProjectApiKey' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectCreateRequest: + required: + - name + type: object + properties: + name: + type: string + description: 'The friendly name of the project, this name appears in reports.' + ProjectListResponse: required: - object - data - first_id - last_id - has_more + type: object properties: object: + enum: + - list type: string - example: list data: type: array items: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/Project' first_id: type: string - example: msg_abc123 last_id: type: string - example: msg_abc123 has_more: type: boolean - example: false - MessageContentImageFileObject: - title: Image file + ProjectServiceAccount: required: - - type - - image_file + - object + - id + - name + - role + - created_at type: object properties: - type: + object: enum: - - image_file + - organization.project.service_account type: string - description: Always `image_file`. - image_file: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageDeltaContentImageFileObject: - title: Image file + description: 'The object type, which is always `organization.project.service_account`' + id: + type: string + description: 'The identifier, which can be referenced in API endpoints' + name: + type: string + description: The name of the service account + role: + enum: + - owner + - member + type: string + description: '`owner` or `member`' + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the service account was created + description: Represents an individual service account in a project. + x-oaiMeta: + name: The project service account object + example: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" + ProjectServiceAccountApiKey: required: - - index - - type + - object + - value + - name + - created_at + - id type: object properties: - index: - type: integer - description: The index of the content part in the message. - type: + object: enum: - - image_file + - organization.project.service_account.api_key type: string - description: Always `image_file`. - image_file: - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageContentImageUrlObject: - title: Image URL + description: 'The object type, which is always `organization.project.service_account.api_key`' + value: + type: string + name: + type: string + created_at: + type: integer + id: + type: string + ProjectServiceAccountCreateRequest: required: - - type - - image_url + - name + type: object + properties: + name: + type: string + description: The name of the service account being created. + ProjectServiceAccountCreateResponse: + required: + - object + - id + - name + - role + - created_at + - api_key + type: object + properties: + object: + enum: + - organization.project.service_account + type: string + id: + type: string + name: + type: string + role: + enum: + - member + type: string + description: Service accounts can only have one role of type `member` + created_at: + type: integer + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' + ProjectServiceAccountDeleteResponse: + required: + - object + - id + - deleted type: object properties: - type: + object: enum: - - image_url + - organization.project.service_account.deleted type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: 'The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`' - default: auto - description: References an image URL in the content of a message. - MessageDeltaContentImageUrlObject: - title: Image URL + id: + type: string + deleted: + type: boolean + ProjectServiceAccountListResponse: required: - - index - - type + - object + - data + - first_id + - last_id + - has_more type: object properties: - index: - type: integer - description: The index of the content part in the message. - type: + object: enum: - - image_url + - list type: string - description: Always `image_url`. - image_url: - type: object - properties: - url: - type: string - description: 'The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: References an image URL in the content of a message. - MessageContentTextObject: - title: Text + data: + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectUpdateRequest: required: - - type - - text + - name type: object properties: - type: - enum: - - text + name: type: string - description: Always `text`. - text: - required: - - value - - annotations - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageContentRefusalObject: - title: Refusal + description: 'The updated name of the project, this name appears in reports.' + ProjectUser: required: - - type - - refusal + - object + - id + - name + - email + - role + - added_at type: object properties: - type: + object: enum: - - refusal + - organization.project.user type: string - description: Always `refusal`. - refusal: + description: 'The object type, which is always `organization.project.user`' + id: type: string - description: The refusal content generated by the assistant. - MessageRequestContentTextObject: - title: Text + description: 'The identifier, which can be referenced in API endpoints' + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + enum: + - owner + - member + type: string + description: '`owner` or `member`' + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. + description: Represents an individual user in a project. + x-oaiMeta: + name: The project user object + example: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + ProjectUserCreateRequest: required: - - type - - text + - user_id + - role type: object properties: - type: - enum: - - text + user_id: type: string - description: Always `text`. - text: + description: The ID of the user. + role: + enum: + - owner + - member type: string - description: Text content to be sent to the model - description: The text content that is part of a message. - MessageContentTextAnnotationsFileCitationObject: - title: File citation + description: '`owner` or `member`' + ProjectUserDeleteResponse: required: - - type - - text - - file_citation - - start_index - - end_index + - object + - id + - deleted type: object properties: - type: + object: enum: - - file_citation + - organization.project.user.deleted type: string - description: Always `file_citation`. - text: + id: type: string - description: The text in the message content that needs to be replaced. - file_citation: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageContentTextAnnotationsFilePathObject: - title: File path + deleted: + type: boolean + ProjectUserListResponse: required: - - type - - text - - file_path - - start_index - - end_index + - object + - data + - first_id + - last_id + - has_more type: object properties: - type: - enum: - - file_path + object: type: string - description: Always `file_path`. - text: + data: + type: array + items: + $ref: '#/components/schemas/ProjectUser' + first_id: type: string - description: The text in the message content that needs to be replaced. - file_path: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - MessageDeltaContentTextObject: - title: Text + last_id: + type: string + has_more: + type: boolean + ProjectUserUpdateRequest: + required: + - role + type: object + properties: + role: + enum: + - owner + - member + type: string + description: '`owner` or `member`' + RealtimeClientEventConversationItemCreate: required: - - index - type + - item type: object properties: - index: - type: integer - description: The index of the content part in the message. + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: - enum: - - text type: string - description: Always `text`. - text: + description: 'The event type, must be "conversation.item.create".' + previous_item_id: + type: string + description: The ID of the preceding item after which the new item will be inserted. + item: type: object properties: - value: + id: type: string - description: The data that makes up the text. - annotations: + description: The unique ID of the item. + type: + type: string + description: 'The type of the item ("message", "function_call", "function_call_output").' + status: + type: string + description: 'The status of the item ("completed", "in_progress", "incomplete").' + role: + type: string + description: 'The role of the message sender ("user", "assistant", "system").' + content: type: array items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageDeltaContentRefusalObject: - title: Refusal + type: object + properties: + type: + type: string + description: 'The content type ("input_text", "input_audio", "text", "audio").' + text: + type: string + description: The text content. + audio: + type: string + description: Base64-encoded audio bytes. + transcript: + type: string + description: The transcript of the audio. + description: The content of the message. + call_id: + type: string + description: The ID of the function call (for "function_call" items). + name: + type: string + description: The name of the function being called (for "function_call" items). + arguments: + type: string + description: The arguments of the function call (for "function_call" items). + output: + type: string + description: The output of the function call (for "function_call_output" items). + description: The item to add to the conversation. + description: Send this event when adding an item to the conversation. + x-oaiMeta: + name: conversation.item.create + group: realtime + example: "{\n \"event_id\": \"event_345\",\n \"type\": \"conversation.item.create\",\n \"previous_item_id\": null,\n \"item\": {\n \"id\": \"msg_001\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_text\",\n \"text\": \"Hello, how are you?\"\n }\n ]\n }\n}\n" + RealtimeClientEventConversationItemDelete: required: - - index - type + - item_id type: object properties: - index: - type: integer - description: The index of the refusal part in the message. + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: - enum: - - refusal type: string - description: Always `refusal`. - refusal: + description: 'The event type, must be "conversation.item.delete".' + item_id: type: string - description: The refusal content that is part of a message. - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + description: The ID of the item to delete. + description: Send this event when you want to remove any item from the conversation history. + x-oaiMeta: + name: conversation.item.delete + group: realtime + example: "{\n \"event_id\": \"event_901\",\n \"type\": \"conversation.item.delete\",\n \"item_id\": \"msg_003\"\n}\n" + RealtimeClientEventConversationItemTruncate: required: - - index - type + - item_id + - content_index + - audio_end_ms type: object properties: - index: - type: integer - description: The index of the annotation in the text content part. + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: - enum: - - file_citation type: string - description: Always `file_citation`. - text: + description: 'The event type, must be "conversation.item.truncate".' + item_id: type: string - description: The text in the message content that needs to be replaced. - file_citation: - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - quote: - type: string - description: The specific quote in the file. - start_index: - minimum: 0 + description: The ID of the assistant message item to truncate. + content_index: type: integer - end_index: - minimum: 0 + description: The index of the content part to truncate. + audio_end_ms: type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + description: 'Inclusive duration up to which audio is truncated, in milliseconds.' + description: Send this event when you want to truncate a previous assistant message’s audio. + x-oaiMeta: + name: conversation.item.truncate + group: realtime + example: "{\n \"event_id\": \"event_678\",\n \"type\": \"conversation.item.truncate\",\n \"item_id\": \"msg_002\",\n \"content_index\": 0,\n \"audio_end_ms\": 1500\n}\n" + RealtimeClientEventInputAudioBufferAppend: required: - - index - type + - audio type: object properties: - index: - type: integer - description: The index of the annotation in the text content part. + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: - enum: - - file_path type: string - description: Always `file_path`. - text: + description: 'The event type, must be "input_audio_buffer.append".' + audio: type: string - description: The text in the message content that needs to be replaced. - file_path: - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - RunStepObject: - title: Run steps + description: Base64-encoded audio bytes. + description: Send this event to append audio bytes to the input audio buffer. + x-oaiMeta: + name: input_audio_buffer.append + group: realtime + example: "{\n \"event_id\": \"event_456\",\n \"type\": \"input_audio_buffer.append\",\n \"audio\": \"Base64EncodedAudioData\"\n}\n" + RealtimeClientEventInputAudioBufferClear: required: - - id - - object - - created_at - - assistant_id - - thread_id - - run_id - type - - status - - step_details - - last_error - - cancelled_at - - failed_at - - completed_at - - usage type: object properties: - id: + event_id: type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' - object: - enum: - - thread.run.step + description: Optional client-generated ID used to identify this event. + type: type: string - description: 'The object type, which is always `thread.run.step`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was created. - assistant_id: + description: 'The event type, must be "input_audio_buffer.clear".' + description: Send this event to clear the audio bytes in the buffer. + x-oaiMeta: + name: input_audio_buffer.clear + group: realtime + example: "{\n \"event_id\": \"event_012\",\n \"type\": \"input_audio_buffer.clear\"\n}\n" + RealtimeClientEventInputAudioBufferCommit: + required: + - type + type: object + properties: + event_id: type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) associated with the run step.' - thread_id: + description: Optional client-generated ID used to identify this event. + type: type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - run_id: + description: 'The event type, must be "input_audio_buffer.commit".' + description: Send this event to commit audio bytes to a user message. + x-oaiMeta: + name: input_audio_buffer.commit + group: realtime + example: "{\n \"event_id\": \"event_789\",\n \"type\": \"input_audio_buffer.commit\"\n}\n" + RealtimeClientEventResponseCancel: + required: + - type + type: object + properties: + event_id: type: string - description: 'The ID of the [run](/docs/api-reference/runs) that this run step is a part of.' + description: Optional client-generated ID used to identify this event. type: - enum: - - message_creation - - tool_calls - type: string - description: 'The type of run step, which can be either `message_creation` or `tool_calls`.' - status: - enum: - - in_progress - - cancelled - - failed - - completed - - expired type: string - description: 'The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.' - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' - description: The details of the run step. - x-oaiExpandable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run step. Will be `null` if there are no errors. - nullable: true - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step completed. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunStepCompletionUsage' - description: "Represents a step in execution of a run.\n" + description: 'The event type, must be "response.cancel".' + description: Send this event to cancel an in-progress response. x-oaiMeta: - name: The run step object - beta: true - example: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - RunStepDeltaObject: - title: Run step delta object + name: response.cancel + group: realtime + example: "{\n \"event_id\": \"event_567\",\n \"type\": \"response.cancel\"\n}\n" + RealtimeClientEventResponseCreate: required: - - id - - object - - delta + - type + - response type: object properties: - id: + event_id: type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' - object: - enum: - - thread.run.step.delta + description: Optional client-generated ID used to identify this event. + type: type: string - description: 'The object type, which is always `thread.run.step.delta`.' - delta: + description: 'The event type, must be "response.create".' + response: type: object properties: - step_details: - type: object + modalities: + type: array + items: + type: string + description: The modalities for the response. + instructions: + type: string + description: Instructions for the model. + voice: + type: string + description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`.' + output_audio_format: + type: string + description: The format of output audio. + tools: + type: array + items: + type: object + properties: + type: + type: string + description: The type of the tool. + name: + type: string + description: The name of the function. + description: + type: string + description: The description of the function. + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: How the model chooses tools. + temperature: + type: number + description: Sampling temperature. + max_output_tokens: oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' - description: The details of the run step. - x-oaiExpandable: true - description: The delta containing the fields that have changed on the run step. - description: "Represents a run step delta i.e. any changed fields on a run step during streaming.\n" + - type: integer + - enum: + - inf + type: string + description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' + description: Configuration for the response. + description: Send this event to trigger a response generation. x-oaiMeta: - name: The run step delta object - beta: true - example: "{\n \"id\": \"step_123\",\n \"object\": \"thread.run.step.delta\",\n \"delta\": {\n \"step_details\": {\n \"type\": \"tool_calls\",\n \"tool_calls\": [\n {\n \"index\": 0,\n \"id\": \"call_123\",\n \"type\": \"code_interpreter\",\n \"code_interpreter\": { \"input\": \"\", \"outputs\": [] }\n }\n ]\n }\n }\n}\n" - ListRunStepsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/RunStepObject' - first_id: - type: string - example: step_abc123 - last_id: - type: string - example: step_abc456 - has_more: - type: boolean - example: false - RunStepDetailsMessageCreationObject: - title: Message creation + name: response.create + group: realtime + example: "{\n \"event_id\": \"event_234\",\n \"type\": \"response.create\",\n \"response\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Please assist the user.\",\n \"voice\": \"alloy\",\n \"output_audio_format\": \"pcm16\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"calculate_sum\",\n \"description\": \"Calculates the sum of two numbers.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": { \"type\": \"number\" },\n \"b\": { \"type\": \"number\" }\n },\n \"required\": [\"a\", \"b\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.7,\n \"max_output_tokens\": 150\n }\n}\n" + RealtimeClientEventSessionUpdate: required: - type - - message_creation + - session type: object properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: - enum: - - message_creation type: string - description: Always `message_creation`. - message_creation: - required: - - message_id + description: 'The event type, must be "session.update".' + session: type: object properties: - message_id: + modalities: + type: array + items: + type: string + description: 'The set of modalities the model can respond with. To disable audio, set this to ["text"].' + instructions: type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation + description: The default system instructions prepended to model calls. + voice: + type: string + description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`. Cannot be changed once the model has responded with audio at least once.' + input_audio_format: + type: string + description: 'The format of input audio. Options are "pcm16", "g711_ulaw", or "g711_alaw".' + output_audio_format: + type: string + description: 'The format of output audio. Options are "pcm16", "g711_ulaw", or "g711_alaw".' + input_audio_transcription: + type: object + properties: + model: + type: string + description: 'The model to use for transcription (e.g., "whisper-1").' + description: Configuration for input audio transcription. Can be set to `null` to turn off. + turn_detection: + type: object + properties: + type: + type: string + description: 'Type of turn detection, only "server_vad" is currently supported.' + threshold: + type: number + description: Activation threshold for VAD (0.0 to 1.0). + prefix_padding_ms: + type: integer + description: Amount of audio to include before speech starts (in milliseconds). + silence_duration_ms: + type: integer + description: Duration of silence to detect speech stop (in milliseconds). + description: Configuration for turn detection. Can be set to `null` to turn off. + tools: + type: array + items: + type: object + properties: + type: + type: string + description: 'The type of the tool, e.g., "function".' + name: + type: string + description: The name of the function. + description: + type: string + description: The description of the function. + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: 'How the model chooses tools. Options are "auto", "none", "required", or specify a function.' + temperature: + type: number + description: Sampling temperature for the model. + max_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: 'Maximum number of output tokens for a single assistant response, inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens, or "inf" for the maximum available tokens for a given model. Defaults to "inf".' + description: Session configuration to update. + description: Send this event to update the session’s default configuration. + x-oaiMeta: + name: session.update + group: realtime + example: "{\n \"event_id\": \"event_123\",\n \"type\": \"session.update\",\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Your knowledge cutoff is 2023-10. You are a helpful assistant.\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather for a location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": { \"type\": \"string\" }\n },\n \"required\": [\"location\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_output_tokens\": null\n }\n}\n" + RealtimeServerEventConversationCreated: required: + - event_id - type + - conversation type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - message_creation type: string - description: Always `message_creation`. - message_creation: + description: 'The event type, must be "conversation.created".' + conversation: type: object properties: - message_id: + id: type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDetailsToolCallsObject: - title: Tool calls + description: The unique ID of the conversation. + object: + type: string + description: 'The object type, must be "realtime.conversation".' + description: The conversation resource. + description: Returned when a conversation is created. Emitted right after session creation. + x-oaiMeta: + name: conversation.created + group: realtime + example: "{\n \"event_id\": \"event_9101\",\n \"type\": \"conversation.created\",\n \"conversation\": {\n \"id\": \"conv_001\",\n \"object\": \"realtime.conversation\"\n }\n}\n" + RealtimeServerEventConversationItemCreated: required: + - event_id - type - - tool_calls + - previous_item_id + - item type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - tool_calls type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls + description: 'The event type, must be "conversation.item.created".' + previous_item_id: + type: string + description: The ID of the preceding item. + item: + type: object + properties: + id: + type: string + description: The unique ID of the item. + object: + type: string + description: 'The object type, must be "realtime.item".' + type: + type: string + description: 'The type of the item ("message", "function_call", "function_call_output").' + status: + type: string + description: 'The status of the item ("completed", "in_progress", "incomplete").' + role: + type: string + description: 'The role associated with the item ("user", "assistant", "system").' + content: + type: array + items: + type: object + properties: + type: + type: string + description: 'The content type ("text", "audio", "input_text", "input_audio").' + text: + type: string + description: The text content. + audio: + type: string + description: Base64-encoded audio data. + transcript: + type: string + description: The transcript of the audio. + description: The content of the item. + call_id: + type: string + description: The ID of the function call (for "function_call" items). + name: + type: string + description: The name of the function being called. + arguments: + type: string + description: The arguments of the function call. + output: + type: string + description: The output of the function call (for "function_call_output" items). + description: The item that was created. + description: Returned when a conversation item is created. + x-oaiMeta: + name: conversation.item.created + group: realtime + example: "{\n \"event_id\": \"event_1920\",\n \"type\": \"conversation.item.created\",\n \"previous_item_id\": \"msg_002\",\n \"item\": {\n \"id\": \"msg_003\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_audio\",\n \"transcript\": null\n }\n ]\n }\n}\n" + RealtimeServerEventConversationItemDeleted: required: + - event_id - type + - item_id type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - tool_calls type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call + description: 'The event type, must be "conversation.item.deleted".' + item_id: + type: string + description: The ID of the item that was deleted. + description: Returned when an item in the conversation is deleted. + x-oaiMeta: + name: conversation.item.deleted + group: realtime + example: "{\n \"event_id\": \"event_2728\",\n \"type\": \"conversation.item.deleted\",\n \"item_id\": \"msg_005\"\n}\n" + RealtimeServerEventConversationItemInputAudioTranscriptionCompleted: required: - - id + - event_id - type - - code_interpreter + - item_id + - content_index + - transcript type: object properties: - id: + event_id: type: string - description: The ID of the tool call. + description: The unique ID of the server event. type: - enum: - - code_interpreter type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - required: - - input - - outputs - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call + description: 'The event type, must be "conversation.item.input_audio_transcription.completed".' + item_id: + type: string + description: The ID of the user message item. + content_index: + type: integer + description: The index of the content part containing the audio. + transcript: + type: string + description: The transcribed text. + description: Returned when input audio transcription is enabled and a transcription succeeds. + x-oaiMeta: + name: conversation.item.input_audio_transcription.completed + group: realtime + example: "{\n \"event_id\": \"event_2122\",\n \"type\": \"conversation.item.input_audio_transcription.completed\",\n \"item_id\": \"msg_003\",\n \"content_index\": 0,\n \"transcript\": \"Hello, how are you?\"\n}\n" + RealtimeServerEventConversationItemInputAudioTranscriptionFailed: required: - - index + - event_id - type + - item_id + - content_index + - error type: object properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + event_id: type: string - description: The ID of the tool call. + description: The unique ID of the server event. type: - enum: - - code_interpreter type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: + description: 'The event type, must be "conversation.item.input_audio_transcription.failed".' + item_id: + type: string + description: The ID of the user message item. + content_index: + type: integer + description: The index of the content part containing the audio. + error: type: object properties: - input: + type: type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output + description: The type of error. + code: + type: string + description: 'Error code, if any.' + message: + type: string + description: A human-readable error message. + param: + type: string + description: 'Parameter related to the error, if any.' + description: Details of the transcription error. + description: 'Returned when input audio transcription is configured, and a transcription request for a user message failed.' + x-oaiMeta: + name: conversation.item.input_audio_transcription.failed + group: realtime + example: "{\n \"event_id\": \"event_2324\",\n \"type\": \"conversation.item.input_audio_transcription.failed\",\n \"item_id\": \"msg_003\",\n \"content_index\": 0,\n \"error\": {\n \"type\": \"transcription_error\",\n \"code\": \"audio_unintelligible\",\n \"message\": \"The audio could not be transcribed.\",\n \"param\": null\n }\n}\n" + RealtimeServerEventConversationItemTruncated: required: + - event_id - type - - logs + - item_id + - content_index + - audio_end_ms type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - logs type: string - description: Always `logs`. - logs: + description: 'The event type, must be "conversation.item.truncated".' + item_id: type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output + description: The ID of the assistant message item that was truncated. + content_index: + type: integer + description: The index of the content part that was truncated. + audio_end_ms: + type: integer + description: 'The duration up to which the audio was truncated, in milliseconds.' + description: Returned when an earlier assistant audio message item is truncated by the client. + x-oaiMeta: + name: conversation.item.truncated + group: realtime + example: "{\n \"event_id\": \"event_2526\",\n \"type\": \"conversation.item.truncated\",\n \"item_id\": \"msg_004\",\n \"content_index\": 0,\n \"audio_end_ms\": 1500\n}\n" + RealtimeServerEventError: required: - - index + - event_id - type + - error type: object properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - enum: - - logs + event_id: type: string - description: Always `logs`. - logs: + description: The unique ID of the server event. + type: type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output + description: 'The event type, must be "error".' + error: + type: object + properties: + type: + type: string + description: 'The type of error (e.g., "invalid_request_error", "server_error").' + code: + type: string + description: 'Error code, if any.' + message: + type: string + description: A human-readable error message. + param: + type: string + description: 'Parameter related to the error, if any.' + event_id: + type: string + description: 'The event_id of the client event that caused the error, if applicable.' + description: Details of the error. + description: Returned when an error occurs. + x-oaiMeta: + name: error + group: realtime + example: "{\n \"event_id\": \"event_890\",\n \"type\": \"error\",\n \"error\": {\n \"type\": \"invalid_request_error\",\n \"code\": \"invalid_event\",\n \"message\": \"The 'type' field is missing.\",\n \"param\": null,\n \"event_id\": \"event_567\"\n }\n}\n" + RealtimeServerEventInputAudioBufferCleared: required: + - event_id - type - - image type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - image type: string - description: Always `image`. - image: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output + description: 'The event type, must be "input_audio_buffer.cleared".' + description: Returned when the input audio buffer is cleared by the client. + x-oaiMeta: + name: input_audio_buffer.cleared + group: realtime + example: "{\n \"event_id\": \"event_1314\",\n \"type\": \"input_audio_buffer.cleared\"\n}\n" + RealtimeServerEventInputAudioBufferCommitted: required: - - index + - event_id - type + - previous_item_id + - item_id type: object properties: - index: - type: integer - description: The index of the output in the outputs array. + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - image type: string - description: Always `image`. - image: - type: object - properties: - file_id: - type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call + description: 'The event type, must be "input_audio_buffer.committed".' + previous_item_id: + type: string + description: The ID of the preceding item after which the new item will be inserted. + item_id: + type: string + description: The ID of the user message item that will be created. + description: 'Returned when an input audio buffer is committed, either by the client or automatically in server VAD mode.' + x-oaiMeta: + name: input_audio_buffer.committed + group: realtime + example: "{\n \"event_id\": \"event_1121\",\n \"type\": \"input_audio_buffer.committed\",\n \"previous_item_id\": \"msg_001\",\n \"item_id\": \"msg_002\"\n}\n" + RealtimeServerEventInputAudioBufferSpeechStarted: required: - - id + - event_id - type - - file_search + - audio_start_ms + - item_id type: object properties: - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: - enum: - - file_search type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - properties: - ranking_options: - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject' - results: - type: array - items: - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject' - description: The results of the file search. - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDetailsToolCallsFileSearchRankingOptionsObject: - title: File search tool call ranking options + description: 'The event type, must be "input_audio_buffer.speech_started".' + audio_start_ms: + type: integer + description: Milliseconds since the session started when speech was detected. + item_id: + type: string + description: The ID of the user message item that will be created when speech stops. + description: Returned in server turn detection mode when speech is detected. + x-oaiMeta: + name: input_audio_buffer.speech_started + group: realtime + example: "{\n \"event_id\": \"event_1516\",\n \"type\": \"input_audio_buffer.speech_started\",\n \"audio_start_ms\": 1000,\n \"item_id\": \"msg_003\"\n}\n" + RealtimeServerEventInputAudioBufferSpeechStopped: required: - - ranker - - score_threshold + - event_id + - type + - audio_end_ms + - item_id type: object properties: - ranker: - enum: - - default_2024_08_21 + event_id: type: string - description: The ranker used for the file search. - score_threshold: - maximum: 1 - minimum: 0 - type: number - description: The score threshold for the file search. All values must be a floating point number between 0 and 1. - description: The ranking options for the file search. - RunStepDetailsToolCallsFileSearchResultObject: - title: File search tool call result + description: The unique ID of the server event. + type: + type: string + description: 'The event type, must be "input_audio_buffer.speech_stopped".' + audio_end_ms: + type: integer + description: Milliseconds since the session started when speech stopped. + item_id: + type: string + description: The ID of the user message item that will be created. + description: Returned in server turn detection mode when speech stops. + x-oaiMeta: + name: input_audio_buffer.speech_stopped + group: realtime + example: "{\n \"event_id\": \"event_1718\",\n \"type\": \"input_audio_buffer.speech_stopped\",\n \"audio_end_ms\": 2000,\n \"item_id\": \"msg_003\"\n}\n" + RealtimeServerEventRateLimitsUpdated: required: - - file_id - - file_name - - score + - event_id + - type + - rate_limits type: object properties: - file_id: + event_id: type: string - description: The ID of the file that result was found in. - file_name: + description: The unique ID of the server event. + type: type: string - description: The name of the file that result was found in. - score: - maximum: 1 - minimum: 0 - type: number - description: The score of the result. All values must be a floating point number between 0 and 1. - content: + description: 'The event type, must be "rate_limits.updated".' + rate_limits: type: array items: type: object properties: - type: - enum: - - text - type: string - description: The type of the content. - text: + name: type: string - description: The text content of the file. - description: The content of the result that was found. The content is only included if requested via the include query parameter. - description: A result instance of the file search. - x-oaiTypeLabel: map - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call + description: 'The name of the rate limit ("requests", "tokens", "input_tokens", "output_tokens").' + limit: + type: integer + description: The maximum allowed value for the rate limit. + remaining: + type: integer + description: The remaining value before the limit is reached. + reset_seconds: + type: number + description: Seconds until the rate limit resets. + description: List of rate limit information. + description: Emitted after every "response.done" event to indicate the updated rate limits. + x-oaiMeta: + name: rate_limits.updated + group: realtime + example: "{\n \"event_id\": \"event_5758\",\n \"type\": \"rate_limits.updated\",\n \"rate_limits\": [\n {\n \"name\": \"requests\",\n \"limit\": 1000,\n \"remaining\": 999,\n \"reset_seconds\": 60\n },\n {\n \"name\": \"tokens\",\n \"limit\": 50000,\n \"remaining\": 49950,\n \"reset_seconds\": 60\n }\n ]\n}\n" + RealtimeServerEventResponseAudioDelta: required: - - index + - event_id - type - - file_search + - response_id + - item_id + - output_index + - content_index + - delta type: object properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: - enum: - - file_search type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDetailsToolCallsFunctionObject: - title: Function tool call + description: 'The event type, must be "response.audio.delta".' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: Base64-encoded audio data delta. + description: Returned when the model-generated audio is updated. + x-oaiMeta: + name: response.audio.delta + group: realtime + example: "{\n \"event_id\": \"event_4950\",\n \"type\": \"response.audio.delta\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"Base64EncodedAudioDelta\"\n}\n" + RealtimeServerEventResponseAudioDone: required: - - id + - event_id - type - - function + - response_id + - item_id + - output_index + - content_index type: object properties: - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: - enum: - - function type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - required: - - name - - arguments - - output - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - RunStepDeltaStepDetailsToolCallsFunctionObject: - title: Function tool call + description: 'The event type, must be "response.audio.done".' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + description: 'Returned when the model-generated audio is done. Also emitted when a Response is interrupted, incomplete, or cancelled.' + x-oaiMeta: + name: response.audio.done + group: realtime + example: "{\n \"event_id\": \"event_5152\",\n \"type\": \"response.audio.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0\n}\n" + RealtimeServerEventResponseAudioTranscriptDelta: required: - - index + - event_id - type + - response_id + - item_id + - output_index + - content_index + - delta type: object properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: - enum: - - function type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - VectorStoreExpirationAfter: - title: Vector store expiration policy - required: - - anchor - - days - type: object - properties: - anchor: - enum: - - last_active_at + description: 'The event type, must be "response.audio_transcript.delta".' + response_id: type: string - description: 'Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.' - days: - maximum: 365 - minimum: 1 + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: type: integer - description: The number of days after the anchor time that the vector store will expire. - description: The expiration policy for a vector store. - VectorStoreObject: - title: Vector store + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The transcript delta. + description: Returned when the model-generated transcription of audio output is updated. + x-oaiMeta: + name: response.audio_transcript.delta + group: realtime + example: "{\n \"event_id\": \"event_4546\",\n \"type\": \"response.audio_transcript.delta\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"Hello, how can I a\"\n}\n" + RealtimeServerEventResponseAudioTranscriptDone: required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - transcript type: object properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store - type: string - description: 'The object type, which is always `vector_store`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was created. - name: + event_id: type: string - description: The name of the vector store. - usage_bytes: - type: integer - description: The total number of bytes used by the files in the vector store. - file_counts: - required: - - in_progress - - completed - - failed - - cancelled - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been successfully processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that were cancelled. - total: - type: integer - description: The total number of files. - status: - enum: - - expired - - in_progress - - completed + description: The unique ID of the server event. + type: type: string - description: 'The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.' - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - expires_at: + description: 'The event type, must be "response.audio_transcript.done".' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: type: integer - description: The Unix timestamp (in seconds) for when the vector store will expire. - nullable: true - last_active_at: + description: The index of the output item in the response. + content_index: type: integer - description: The Unix timestamp (in seconds) for when the vector store was last active. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: A vector store is a collection of processed files can be used by the `file_search` tool. + description: The index of the content part in the item's content array. + transcript: + type: string + description: The final transcript of the audio. + description: 'Returned when the model-generated transcription of audio output is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.' x-oaiMeta: - name: The vector store object - beta: true - example: "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"metadata\": {},\n \"last_used_at\": 1698107661\n}\n" - CreateVectorStoreRequest: + name: response.audio_transcript.done + group: realtime + example: "{\n \"event_id\": \"event_4748\",\n \"type\": \"response.audio_transcript.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"transcript\": \"Hello, how can I assist you today?\"\n}\n" + RealtimeServerEventResponseContentPartAdded: + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part type: object properties: - file_ids: - maxItems: 500 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - name: + event_id: type: string - description: The name of the vector store. - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - UpdateVectorStoreRequest: - type: object - properties: - name: + description: The unique ID of the server event. + type: type: string - description: The name of the vector store. - nullable: true - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - metadata: + description: 'The event type, must be "response.content_part.added".' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item to which the content part was added. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ListVectorStoresResponse: + properties: + type: + type: string + description: 'The content type ("text", "audio").' + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + description: The content part that was added. + description: Returned when a new content part is added to an assistant message item during response generation. + x-oaiMeta: + name: response.content_part.added + group: realtime + example: "{\n \"event_id\": \"event_3738\",\n \"type\": \"response.content_part.added\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"part\": {\n \"type\": \"text\",\n \"text\": \"\"\n }\n}\n" + RealtimeServerEventResponseContentPartDone: required: - - object - - data - - first_id - - last_id - - has_more + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part + type: object properties: - object: + event_id: type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreObject' - first_id: + description: The unique ID of the server event. + type: type: string - example: vs_abc123 - last_id: + description: 'The event type, must be "response.content_part.done".' + response_id: type: string - example: vs_abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreResponse: + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: + type: object + properties: + type: + type: string + description: 'The content type ("text", "audio").' + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + description: The content part that is done. + description: 'Returned when a content part is done streaming in an assistant message item. Also emitted when a Response is interrupted, incomplete, or cancelled.' + x-oaiMeta: + name: response.content_part.done + group: realtime + example: "{\n \"event_id\": \"event_3940\",\n \"type\": \"response.content_part.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"part\": {\n \"type\": \"text\",\n \"text\": \"Sure, I can help with that.\"\n }\n}\n" + RealtimeServerEventResponseCreated: required: - - id - - object - - deleted + - event_id + - type + - response type: object properties: - id: + event_id: type: string - deleted: - type: boolean - object: - enum: - - vector_store.deleted + description: The unique ID of the server event. + type: type: string - VectorStoreFileObject: - title: Vector store files + description: 'The event type, must be "response.created".' + response: + type: object + properties: + id: + type: string + description: The unique ID of the response. + object: + type: string + description: 'The object type, must be "realtime.response".' + status: + type: string + description: The status of the response ("in_progress"). + status_details: + type: object + description: Additional details about the status. + output: + type: array + items: + type: object + description: An item in the response output. + description: The list of output items generated by the response. + usage: + type: object + description: Usage statistics for the response. + description: The response resource. + description: 'Returned when a new Response is created. The first event of response creation, where the response is in an initial state of "in_progress".' + x-oaiMeta: + name: response.created + group: realtime + example: "{\n \"event_id\": \"event_2930\",\n \"type\": \"response.created\",\n \"response\": {\n \"id\": \"resp_001\",\n \"object\": \"realtime.response\",\n \"status\": \"in_progress\",\n \"status_details\": null,\n \"output\": [],\n \"usage\": null\n }\n}\n" + RealtimeServerEventResponseDone: required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error + - event_id + - type + - response type: object properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store.file - type: string - description: 'The object type, which is always `vector_store.file`.' - usage_bytes: - type: integer - description: The total vector store usage in bytes. Note that this may be different from the original file size. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store file was created. - vector_store_id: + event_id: type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed + description: The unique ID of the server event. + type: type: string - description: 'The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.' - last_error: - required: - - code - - message + description: 'The event type, must be "response.done".' + response: type: object properties: - code: - enum: - - server_error - - unsupported_file - - invalid_file + id: type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: + description: The unique ID of the response. + object: type: string - description: A human-readable description of the error. - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/StaticChunkingStrategyResponseParam' - - $ref: '#/components/schemas/OtherChunkingStrategyResponseParam' - description: The strategy used to chunk the file. - x-oaiExpandable: true - description: A list of files attached to a vector store. + description: 'The object type, must be "realtime.response".' + status: + type: string + description: 'The final status of the response ("completed", "cancelled", "failed", "incomplete").' + status_details: + type: object + description: Additional details about the status. + output: + type: array + items: + type: object + description: An item in the response output. + description: The list of output items generated by the response. + usage: + type: object + description: Usage statistics for the response. + description: The response resource. + description: 'Returned when a Response is done streaming. Always emitted, no matter the final state.' x-oaiMeta: - name: The vector store file object - beta: true - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" - OtherChunkingStrategyResponseParam: - title: Other Chunking Strategy + name: response.done + group: realtime + example: "{\n \"event_id\": \"event_3132\",\n \"type\": \"response.done\",\n \"response\": {\n \"id\": \"resp_001\",\n \"object\": \"realtime.response\",\n \"status\": \"completed\",\n \"status_details\": null,\n \"output\": [\n {\n \"id\": \"msg_006\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Sure, how can I assist you today?\"\n }\n ]\n }\n ],\n \"usage\": {\n \"total_tokens\": 50,\n \"input_tokens\": 20,\n \"output_tokens\": 30\n }\n }\n}\n" + RealtimeServerEventResponseFunctionCallArgumentsDelta: required: + - event_id - type + - response_id + - item_id + - output_index + - call_id + - delta type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - other type: string - description: Always `other`. - additionalProperties: false - description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' - StaticChunkingStrategyResponseParam: - title: Static Chunking Strategy + description: 'The event type, must be "response.function_call_arguments.delta".' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: + type: integer + description: The index of the output item in the response. + call_id: + type: string + description: The ID of the function call. + delta: + type: string + description: The arguments delta as a JSON string. + description: Returned when the model-generated function call arguments are updated. + x-oaiMeta: + name: response.function_call_arguments.delta + group: realtime + example: "{\n \"event_id\": \"event_5354\",\n \"type\": \"response.function_call_arguments.delta\",\n \"response_id\": \"resp_002\",\n \"item_id\": \"fc_001\",\n \"output_index\": 0,\n \"call_id\": \"call_001\",\n \"delta\": \"{\\\"location\\\": \\\"San\\\"\"\n}\n" + RealtimeServerEventResponseFunctionCallArgumentsDone: required: + - event_id - type - - static + - response_id + - item_id + - output_index + - call_id + - arguments type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - static type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - StaticChunkingStrategy: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: + description: 'The event type, must be "response.function_call_arguments.done".' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - AutoChunkingStrategyRequestParam: - title: Auto Chunking Strategy + description: The index of the output item in the response. + call_id: + type: string + description: The ID of the function call. + arguments: + type: string + description: The final arguments as a JSON string. + description: 'Returned when the model-generated function call arguments are done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.' + x-oaiMeta: + name: response.function_call_arguments.done + group: realtime + example: "{\n \"event_id\": \"event_5556\",\n \"type\": \"response.function_call_arguments.done\",\n \"response_id\": \"resp_002\",\n \"item_id\": \"fc_001\",\n \"output_index\": 0,\n \"call_id\": \"call_001\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco\\\"}\"\n}\n" + RealtimeServerEventResponseOutputItemAdded: required: + - event_id - type + - response_id + - output_index + - item type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - auto type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - StaticChunkingStrategyRequestParam: - title: Static Chunking Strategy + description: 'The event type, must be "response.output_item.added".' + response_id: + type: string + description: The ID of the response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the response. + item: + type: object + properties: + id: + type: string + description: The unique ID of the item. + object: + type: string + description: 'The object type, must be "realtime.item".' + type: + type: string + description: 'The type of the item ("message", "function_call", "function_call_output").' + status: + type: string + description: 'The status of the item ("in_progress", "completed").' + role: + type: string + description: The role associated with the item ("assistant"). + content: + type: array + items: + type: object + properties: + type: + type: string + description: 'The content type ("text", "audio").' + text: + type: string + description: The text content. + audio: + type: string + description: Base64-encoded audio data. + transcript: + type: string + description: The transcript of the audio. + description: The content of the item. + description: The item that was added. + description: Returned when a new Item is created during response generation. + x-oaiMeta: + name: response.output_item.added + group: realtime + example: "{\n \"event_id\": \"event_3334\",\n \"type\": \"response.output_item.added\",\n \"response_id\": \"resp_001\",\n \"output_index\": 0,\n \"item\": {\n \"id\": \"msg_007\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"in_progress\",\n \"role\": \"assistant\",\n \"content\": []\n }\n}\n" + RealtimeServerEventResponseOutputItemDone: required: + - event_id - type - - static + - response_id + - output_index + - item type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: - enum: - - static type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - ChunkingStrategyRequestParam: - type: object - oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - CreateVectorStoreFileRequest: + description: 'The event type, must be "response.output_item.done".' + response_id: + type: string + description: The ID of the response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the response. + item: + type: object + properties: + id: + type: string + description: The unique ID of the item. + object: + type: string + description: 'The object type, must be "realtime.item".' + type: + type: string + description: 'The type of the item ("message", "function_call", "function_call_output").' + status: + type: string + description: 'The final status of the item ("completed", "incomplete").' + role: + type: string + description: The role associated with the item ("assistant"). + content: + type: array + items: + type: object + properties: + type: + type: string + description: 'The content type ("text", "audio").' + text: + type: string + description: The text content. + audio: + type: string + description: Base64-encoded audio data. + transcript: + type: string + description: The transcript of the audio. + description: The content of the item. + description: The completed item. + description: 'Returned when an Item is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.' + x-oaiMeta: + name: response.output_item.done + group: realtime + example: "{\n \"event_id\": \"event_3536\",\n \"type\": \"response.output_item.done\",\n \"response_id\": \"resp_001\",\n \"output_index\": 0,\n \"item\": {\n \"id\": \"msg_007\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Sure, I can help with that.\"\n }\n ]\n }\n}\n" + RealtimeServerEventResponseTextDelta: required: - - file_id + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta type: object properties: - file_id: + event_id: type: string - description: 'A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - ListVectorStoreFilesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: + description: The unique ID of the server event. + type: type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreFileObject' - first_id: + description: 'The event type, must be "response.text.delta".' + response_id: type: string - example: file-abc123 - last_id: + description: The ID of the response. + item_id: type: string - example: file-abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreFileResponse: + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The text delta. + description: Returned when the text value of a "text" content part is updated. + x-oaiMeta: + name: response.text.delta + group: realtime + example: "{\n \"event_id\": \"event_4142\",\n \"type\": \"response.text.delta\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"Sure, I can h\"\n}\n" + RealtimeServerEventResponseTextDone: required: - - id - - object - - deleted + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - text type: object properties: - id: + event_id: type: string - deleted: - type: boolean - object: - enum: - - vector_store.file.deleted + description: The unique ID of the server event. + type: type: string - VectorStoreFileBatchObject: - title: Vector store file batch - required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts - type: object - properties: - id: + description: 'The event type, must be "response.text.done".' + response_id: type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store.files_batch + description: The ID of the response. + item_id: type: string - description: 'The object type, which is always `vector_store.file_batch`.' - created_at: + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: type: integer - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - vector_store_id: - type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed + description: The index of the content part in the item's content array. + text: type: string - description: 'The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.' - file_counts: - required: - - in_progress - - completed - - cancelled - - failed - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that where cancelled. - total: - type: integer - description: The total number of files. - description: A batch of files attached to a vector store. + description: The final text content. + description: 'Returned when the text value of a "text" content part is done streaming. Also emitted when a Response is interrupted, incomplete, or cancelled.' x-oaiMeta: - name: The vector store files batch object - beta: true - example: "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" - CreateVectorStoreFileBatchRequest: + name: response.text.done + group: realtime + example: "{\n \"event_id\": \"event_4344\",\n \"type\": \"response.text.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"text\": \"Sure, I can help with that.\"\n}\n" + RealtimeServerEventSessionCreated: required: - - file_ids + - event_id + - type + - session type: object properties: - file_ids: - maxItems: 500 - minItems: 1 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - AssistantStreamEvent: - oneOf: - - $ref: '#/components/schemas/ThreadStreamEvent' - - $ref: '#/components/schemas/RunStreamEvent' - - $ref: '#/components/schemas/RunStepStreamEvent' - - $ref: '#/components/schemas/MessageStreamEvent' - - $ref: '#/components/schemas/ErrorEvent' - - $ref: '#/components/schemas/DoneEvent' - description: "Represents an event emitted when streaming a Run.\n\nEach event in a server-sent events stream has an `event` and `data` property:\n\n```\nevent: thread.created\ndata: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n```\n\nWe emit events whenever a new object is created, transitions to a new state, or is being\nstreamed in parts (deltas). For example, we emit `thread.run.created` when a new run\nis created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses\nto create a message during a run, we emit a `thread.message.created event`, a\n`thread.message.in_progress` event, many `thread.message.delta` events, and finally a\n`thread.message.completed` event.\n\nWe may add additional events over time, so we recommend handling unknown events gracefully\nin your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to\nintegrate the Assistants API with streaming.\n" - x-oaiMeta: - name: Assistant stream events - beta: true - ThreadStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.created - type: string - data: - $ref: '#/components/schemas/ThreadObject' - description: 'Occurs when a new [thread](/docs/api-reference/threads/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [thread](/docs/api-reference/threads/object)' - RunStreamEvent: - oneOf: - - required: - - event - - data + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + description: 'The event type, must be "session.created".' + session: type: object properties: - event: - enum: - - thread.run.created + id: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a new [run](/docs/api-reference/runs/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.queued + description: The unique ID of the session. + object: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.in_progress + description: 'The object type, must be "realtime.session".' + model: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.requires_action + description: The default model used for this session. + modalities: + type: array + items: + type: string + description: The set of modalities the model can respond with. + instructions: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.completed + description: The default system instructions. + voice: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.incomplete + description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`.' + input_audio_format: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.failed + description: The format of input audio. + output_audio_format: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.cancelling + description: The format of output audio. + input_audio_transcription: + type: object + properties: + enabled: + type: boolean + description: Whether input audio transcription is enabled. + model: + type: string + description: The model used for transcription. + description: Configuration for input audio transcription. + turn_detection: + type: object + properties: + type: + type: string + description: The type of turn detection ("server_vad" or "none"). + threshold: + type: number + description: Activation threshold for VAD. + prefix_padding_ms: + type: integer + description: Audio included before speech starts (in milliseconds). + silence_duration_ms: + type: integer + description: Duration of silence to detect speech stop (in milliseconds). + description: Configuration for turn detection. + tools: + type: array + items: + type: object + properties: + type: + type: string + description: The type of the tool. + name: + type: string + description: The name of the function. + description: + type: string + description: The description of the function. + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data + description: How the model chooses tools. + temperature: + type: number + description: Sampling temperature. + max_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: Maximum number of output tokens. + description: The session resource. + description: Returned when a session is created. Emitted automatically when a new connection is established. + x-oaiMeta: + name: session.created + group: realtime + example: "{\n \"event_id\": \"event_1234\",\n \"type\": \"session.created\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_output_tokens\": null\n }\n}\n" + RealtimeServerEventSessionUpdated: + required: + - event_id + - type + - session + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + description: 'The event type, must be "session.updated".' + session: type: object properties: - event: - enum: - - thread.run.cancelled + id: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.expired + description: The unique ID of the session. + object: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - RunStepStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.created + description: 'The object type, must be "realtime.session".' + model: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.in_progress + description: The default model used for this session. + modalities: + type: array + items: + type: string + description: The set of modalities the model can respond with. + instructions: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.delta + description: The default system instructions. + voice: type: string - data: - $ref: '#/components/schemas/RunStepDeltaObject' - description: 'Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.completed + description: 'The voice the model uses to respond - one of `alloy`, `echo`, or `shimmer`.' + input_audio_format: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' - - required: - - event - - data + description: The format of input audio. + output_audio_format: + type: string + description: The format of output audio. + input_audio_transcription: + type: object + properties: + enabled: + type: boolean + description: Whether input audio transcription is enabled. + model: + type: string + description: The model used for transcription. + description: Configuration for input audio transcription. + turn_detection: + type: object + properties: + type: + type: string + description: The type of turn detection ("server_vad" or "none"). + threshold: + type: number + description: Activation threshold for VAD. + prefix_padding_ms: + type: integer + description: Audio included before speech starts (in milliseconds). + silence_duration_ms: + type: integer + description: Duration of silence to detect speech stop (in milliseconds). + description: Configuration for turn detection. + tools: + type: array + items: + type: object + properties: + type: + type: string + description: The type of the tool. + name: + type: string + description: The name of the function. + description: + type: string + description: The description of the function. + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: How the model chooses tools. + temperature: + type: number + description: Sampling temperature. + max_output_tokens: + oneOf: + - type: integer + - enum: + - inf + type: string + description: Maximum number of output tokens. + description: The updated session resource. + description: Returned when a session is updated. + x-oaiMeta: + name: session.updated + group: realtime + example: "{\n \"event_id\": \"event_5678\",\n \"type\": \"session.updated\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\"],\n \"instructions\": \"New instructions\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"enabled\": true,\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"none\"\n },\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_output_tokens\": 200\n }\n}\n" + ResponseFormatJsonObject: + required: + - type + type: object + properties: + type: + enum: + - json_object + type: string + description: 'The type of response format being defined: `json_object`' + ResponseFormatJsonSchema: + required: + - type + - json_schema + type: object + properties: + type: + enum: + - json_schema + type: string + description: 'The type of response format being defined: `json_schema`' + json_schema: + required: + - type + - name type: object properties: - event: - enum: - - thread.run.step.failed + description: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' - - required: - - event - - data + description: 'A description of what the response format is for, used by the model to determine how to respond in the format.' + name: + type: string + description: 'The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + description: 'Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).' + default: false + nullable: true + ResponseFormatJsonSchemaSchema: + type: object + description: 'The schema for the response format, described as a JSON Schema object.' + ResponseFormatText: + required: + - type + type: object + properties: + type: + enum: + - text + type: string + description: 'The type of response format being defined: `text`' + RunCompletionUsage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + description: 'Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).' + nullable: true + RunObject: + title: A run on a thread + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format + type: object + properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' + object: + enum: + - thread.run + type: string + description: 'The object type, which is always `thread.run`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was created. + thread_id: + type: string + description: 'The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run.' + assistant_id: + type: string + description: 'The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run.' + status: + enum: + - queued + - in_progress + - requires_action + - cancelling + - cancelled + - failed + - completed + - incomplete + - expired + type: string + description: 'The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.' + required_action: + required: + - type + - submit_tool_outputs type: object properties: - event: + type: enum: - - thread.run.step.cancelled + - submit_tool_outputs type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' - - required: - - event - - data + description: 'For now, this is always `submit_tool_outputs`.' + submit_tool_outputs: + required: + - tool_calls + type: object + properties: + tool_calls: + type: array + items: + $ref: '#/components/schemas/RunToolCallObject' + description: A list of the relevant tool calls. + description: Details on the tool outputs needed for this run to continue. + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + last_error: + required: + - code + - message type: object properties: - event: + code: enum: - - thread.run.step.expired + - server_error + - rate_limit_exceeded + - invalid_prompt type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' - MessageStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.created + description: 'One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.' + message: type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + description: A human-readable description of the error. + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the run will expire. + nullable: true + started_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was started. + nullable: true + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was cancelled. + nullable: true + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run failed. + nullable: true + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was completed. + nullable: true + incomplete_details: type: object properties: - event: + reason: enum: - - thread.message.in_progress + - max_completion_tokens + - max_prompt_tokens type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. + nullable: true + model: + type: string + description: 'The model that the [assistant](/docs/api-reference/assistants) used for this run.' + instructions: + type: string + description: 'The instructions that the [assistant](/docs/api-reference/assistants) used for this run.' + tools: + maxItems: 20 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: 'The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.' + metadata: type: object - properties: - event: - enum: - - thread.message.delta - type: string - data: - $ref: '#/components/schemas/MessageDeltaObject' - description: 'Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)' - - required: - - event - - data + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + usage: + $ref: '#/components/schemas/RunCompletionUsage' + temperature: + type: number + description: 'The sampling temperature used for this run. If not set, defaults to 1.' + nullable: true + top_p: + type: number + description: 'The nucleus sampling value used for this run. If not set, defaults to 1.' + nullable: true + max_prompt_tokens: + minimum: 256 + type: integer + description: "The maximum number of prompt tokens specified to have been used over the course of the run.\n" + nullable: true + max_completion_tokens: + minimum: 256 + type: integer + description: "The maximum number of completion tokens specified to have been used over the course of the run.\n" + nullable: true + truncation_strategy: + $ref: '#/components/schemas/TruncationObject' + tool_choice: + $ref: '#/components/schemas/AssistantsApiToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + description: 'Represents an execution run on a [thread](/docs/api-reference/threads).' + x-oaiMeta: + name: The run object + beta: true + example: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + RunStepCompletionUsage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + nullable: true + RunStepDeltaObject: + title: Run step delta object + required: + - id + - object + - delta + type: object + properties: + id: + type: string + description: 'The identifier of the run step, which can be referenced in API endpoints.' + object: + enum: + - thread.run.step.delta + type: string + description: 'The object type, which is always `thread.run.step.delta`.' + delta: type: object properties: - event: - enum: - - thread.message.completed - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + step_details: + type: object + oneOf: + - $ref: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' + description: The details of the run step. + x-oaiExpandable: true + description: The delta containing the fields that have changed on the run step. + description: "Represents a run step delta i.e. any changed fields on a run step during streaming.\n" + x-oaiMeta: + name: The run step delta object + beta: true + example: "{\n \"id\": \"step_123\",\n \"object\": \"thread.run.step.delta\",\n \"delta\": {\n \"step_details\": {\n \"type\": \"tool_calls\",\n \"tool_calls\": [\n {\n \"index\": 0,\n \"id\": \"call_123\",\n \"type\": \"code_interpreter\",\n \"code_interpreter\": { \"input\": \"\", \"outputs\": [] }\n }\n ]\n }\n }\n}\n" + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + required: + - type + type: object + properties: + type: + enum: + - message_creation + type: string + description: Always `message_creation`. + message_creation: type: object properties: - event: - enum: - - thread.message.incomplete + message_id: type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - ErrorEvent: + description: The ID of the message that was created by this run step. + description: Details of the message creation by the run step. + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call required: - - event - - data + - index + - type type: object properties: - event: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: enum: - - error + - code_interpreter type: string - data: - $ref: '#/components/schemas/Error' - description: 'Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.' - x-oaiMeta: - dataDescription: '`data` is an [error](/docs/guides/error-codes/api-errors)' - DoneEvent: + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + code_interpreter: + type: object + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + items: + type: object + oneOf: + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output required: - - event - - data + - index + - type type: object properties: - event: + index: + type: integer + description: The index of the output in the outputs array. + type: enum: - - done + - image type: string - data: + description: Always `image`. + image: + type: object + properties: + file_id: + type: string + description: 'The [file](/docs/api-reference/files) ID of the image.' + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: enum: - - '[DONE]' + - logs type: string - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: '`data` is `[DONE]`' - Batch: + description: Always `logs`. + logs: + type: string + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call required: - - id - - object - - endpoint - - input_file_id - - completion_window - - status - - created_at + - index + - type + - file_search type: object properties: + index: + type: integer + description: The index of the tool call in the tool calls array. id: type: string - object: + description: The ID of the tool call object. + type: enum: - - batch - type: string - description: 'The object type, which is always `batch`.' - endpoint: + - file_search type: string - description: The OpenAI API endpoint used by the batch. - errors: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + file_search: type: object - properties: - object: - type: string - description: 'The object type, which is always `list`.' - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: 'The name of the parameter that caused the error, if applicable.' - nullable: true - line: - type: integer - description: 'The line number of the input file where the error occurred, if applicable.' - nullable: true - input_file_id: - type: string - description: The ID of the input file for the batch. - completion_window: + description: 'For now, this is always going to be an empty object.' + x-oaiTypeLabel: map + RunStepDeltaStepDetailsToolCallsFunctionObject: + title: Function tool call + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: type: string - description: The time frame within which the batch should be processed. - status: + description: The ID of the tool call object. + type: enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - type: string - description: The current status of the batch. - output_file_id: - type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: + - function type: string - description: The ID of the file containing the outputs of requests with errors. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - required: - - total - - completed - - failed + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: type: object properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - description: The request counts for different statuses within the batch. - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - x-oaiMeta: - name: The batch object - example: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - BatchRequestInput: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' + nullable: true + description: The definition of the function that was called. + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + required: + - type + type: object + properties: + type: + enum: + - tool_calls + type: string + description: Always `tool_calls`. + tool_calls: + type: array + items: + oneOf: + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" + description: Details of the tool call. + RunStepDetailsMessageCreationObject: + title: Message creation + required: + - type + - message_creation type: object properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + type: enum: - - POST - type: string - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: + - message_creation type: string - description: 'The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.' - description: The per-line object of the batch input file - x-oaiMeta: - name: The request input object - example: "{\"custom_id\": \"request-1\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \"body\": {\"model\": \"gpt-4o-mini\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is 2+2?\"}]}}\n" - BatchRequestOutput: + description: Always `message_creation`. + message_creation: + required: + - message_id + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + description: Details of the message creation by the run step. + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + required: + - id + - type + - code_interpreter type: object properties: id: type: string - custom_id: + description: The ID of the tool call. + type: + enum: + - code_interpreter type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + code_interpreter: + required: + - input + - outputs type: object properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: + input: type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - description: The JSON body of the response - x-oaiTypeLabel: map - nullable: true - error: + description: The input to the Code Interpreter tool call. + outputs: + type: array + items: + type: object + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + required: + - type + - image + type: object + properties: + type: + enum: + - image + type: string + description: Always `image`. + image: + required: + - file_id type: object properties: - code: - type: string - description: A machine-readable error code. - message: + file_id: type: string - description: A human-readable error message. - description: 'For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.' - nullable: true - description: The per-line object of the batch output and error files - x-oaiMeta: - name: The request output object - example: "{\"id\": \"batch_req_wnaDys\", \"custom_id\": \"request-2\", \"response\": {\"status_code\": 200, \"request_id\": \"req_c187b3\", \"body\": {\"id\": \"chatcmpl-9758Iw\", \"object\": \"chat.completion\", \"created\": 1711475054, \"model\": \"gpt-4o-mini\", \"choices\": [{\"index\": 0, \"message\": {\"role\": \"assistant\", \"content\": \"2 + 2 equals 4.\"}, \"finish_reason\": \"stop\"}], \"usage\": {\"prompt_tokens\": 24, \"completion_tokens\": 15, \"total_tokens\": 39}, \"system_fingerprint\": null}}, \"error\": null}\n" - ListBatchesResponse: + description: 'The [file](/docs/api-reference/files) ID of the image.' + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output required: - - object - - data - - has_more + - type + - logs type: object properties: - data: - type: array - items: - $ref: '#/components/schemas/Batch' - first_id: + type: + enum: + - logs type: string - example: batch_abc123 - last_id: + description: Always `logs`. + logs: type: string - example: batch_abc456 - has_more: - type: boolean - object: + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + required: + - id + - type + - file_search + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: enum: - - list + - file_search type: string - AuditLogActorServiceAccount: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + file_search: + type: object + properties: + ranking_options: + $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject' + results: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject' + description: The results of the file search. + description: 'For now, this is always going to be an empty object.' + x-oaiTypeLabel: map + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + required: + - ranker + - score_threshold type: object properties: - id: + ranker: + enum: + - default_2024_08_21 type: string - description: The service account id. - description: The service account that performed the audit logged action. - AuditLogActorUser: + description: The ranker used for the file search. + score_threshold: + maximum: 1 + minimum: 0 + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + description: The ranking options for the file search. + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + required: + - file_id + - file_name + - score type: object properties: - id: + file_id: type: string - description: The user id. - email: + description: The ID of the file that result was found in. + file_name: type: string - description: The user email. - description: The user who performed the audit logged action. - AuditLogActorApiKey: + description: The name of the file that result was found in. + score: + maximum: 1 + minimum: 0 + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + content: + type: array + items: + type: object + properties: + type: + enum: + - text + type: string + description: The type of the content. + text: + type: string + description: The text content of the file. + description: The content of the result that was found. The content is only included if requested via the include query parameter. + description: A result instance of the file search. + x-oaiTypeLabel: map + RunStepDetailsToolCallsFunctionObject: + title: Function tool call + required: + - id + - type + - function type: object properties: id: type: string - description: The tracking id of the API key. + description: The ID of the tool call object. type: enum: - - user - - service_account - type: string - description: The type of API key. Can be either `user` or `service_account`. - user: - $ref: '#/components/schemas/AuditLogActorUser' - service_account: - $ref: '#/components/schemas/AuditLogActorServiceAccount' - description: The API Key used to perform the audit logged action. - AuditLogActorSession: - type: object - properties: - user: - $ref: '#/components/schemas/AuditLogActorUser' - ip_address: + - function type: string - description: The IP address from which the action was performed. - description: The session in which the audit logged action was performed. - AuditLogActor: + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: + required: + - name + - arguments + - output + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' + nullable: true + description: The definition of the function that was called. + RunStepDetailsToolCallsObject: + title: Tool calls + required: + - type + - tool_calls type: object properties: type: enum: - - session - - api_key + - tool_calls type: string - description: The type of actor. Is either `session` or `api_key`. - session: - $ref: '#/components/schemas/AuditLogActorSession' - api_key: - $ref: '#/components/schemas/AuditLogActorApiKey' - description: The actor who performed the audit logged action. - AuditLogEventType: - enum: - - api_key.created - - api_key.updated - - api_key.deleted - - invite.sent - - invite.accepted - - invite.deleted - - login.succeeded - - login.failed - - logout.succeeded - - logout.failed - - organization.updated - - project.created - - project.updated - - project.archived - - service_account.created - - service_account.updated - - service_account.deleted - - user.added - - user.updated - - user.deleted - type: string - description: The event type. - x-oaiExpandable: true - AuditLog: + description: Always `tool_calls`. + tool_calls: + type: array + items: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" + description: Details of the tool call. + RunStepObject: + title: Run steps required: - id + - object + - created_at + - assistant_id + - thread_id + - run_id - type - - effective_at - - actor + - status + - step_details + - last_error + - cancelled_at + - failed_at + - completed_at + - usage type: object properties: id: type: string - description: The ID of this log. - type: - $ref: '#/components/schemas/AuditLogEventType' - effective_at: + description: 'The identifier of the run step, which can be referenced in API endpoints.' + object: + enum: + - thread.run.step + type: string + description: 'The object type, which is always `thread.run.step`.' + created_at: type: integer - description: The Unix timestamp (in seconds) of the event. - project: + description: The Unix timestamp (in seconds) for when the run step was created. + assistant_id: + type: string + description: 'The ID of the [assistant](/docs/api-reference/assistants) associated with the run step.' + thread_id: + type: string + description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + run_id: + type: string + description: 'The ID of the [run](/docs/api-reference/runs) that this run step is a part of.' + type: + enum: + - message_creation + - tool_calls + type: string + description: 'The type of run step, which can be either `message_creation` or `tool_calls`.' + status: + enum: + - in_progress + - cancelled + - failed + - completed + - expired + type: string + description: 'The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.' + step_details: type: object - properties: - id: - type: string - description: The project ID. - name: - type: string - description: The project title. - description: The project that the action was scoped to. Absent for actions not scoped to projects. - actor: - $ref: '#/components/schemas/AuditLogActor' - api_key.created: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' + description: The details of the run step. + x-oaiExpandable: true + last_error: + required: + - code + - message type: object properties: - id: + code: + enum: + - server_error + - rate_limit_exceeded type: string - description: The tracking ID of the API key. - data: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to create the API key. - description: The details for events with this `type`. - api_key.updated: - type: object - properties: - id: + description: One of `server_error` or `rate_limit_exceeded`. + message: type: string - description: The tracking ID of the API key. - changes_requested: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to update the API key. - description: The details for events with this `type`. - api_key.deleted: + description: A human-readable description of the error. + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + nullable: true + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step was cancelled. + nullable: true + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step failed. + nullable: true + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step completed. + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + usage: + $ref: '#/components/schemas/RunStepCompletionUsage' + description: "Represents a step in execution of a run.\n" + x-oaiMeta: + name: The run step object + beta: true + example: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" + RunStepStreamEvent: + oneOf: + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.step.created type: string - description: The tracking ID of the API key. - description: The details for events with this `type`. - invite.sent: + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.step.in_progress type: string - description: The ID of the invite. data: - type: object - properties: - email: - type: string - description: The email invited to the organization. - role: - type: string - description: The role the email was invited to be. Is either `owner` or `member`. - description: The payload used to create the invite. - description: The details for events with this `type`. - invite.accepted: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.step.delta type: string - description: The ID of the invite. - description: The details for events with this `type`. - invite.deleted: + data: + $ref: '#/components/schemas/RunStepDeltaObject' + description: 'Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed.' + x-oaiMeta: + dataDescription: '`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.step.completed type: string - description: The ID of the invite. - description: The details for events with this `type`. - login.failed: + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data type: object properties: - error_code: - type: string - description: The error code of the failure. - error_message: + event: + enum: + - thread.run.step.failed type: string - description: The error message of the failure. - description: The details for events with this `type`. - logout.failed: + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data type: object properties: - error_code: - type: string - description: The error code of the failure. - error_message: + event: + enum: + - thread.run.step.cancelled type: string - description: The error message of the failure. - description: The details for events with this `type`. - organization.updated: + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.step.expired type: string - description: The organization ID. - changes_requested: - type: object - properties: - title: - type: string - description: The organization title. - description: - type: string - description: The organization description. - name: - type: string - description: The organization name. - settings: - type: object - properties: - threads_ui_visibility: - type: string - description: 'Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.' - usage_dashboard_visibility: - type: string - description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. - description: The payload used to update the organization settings. - description: The details for events with this `type`. - project.created: + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + RunStreamEvent: + oneOf: + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.created type: string - description: The project ID. data: - type: object - properties: - name: - type: string - description: The project name. - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to create the project. - description: The details for events with this `type`. - project.updated: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a new [run](/docs/api-reference/runs/object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.queued type: string - description: The project ID. - changes_requested: - type: object - properties: - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to update the project. - description: The details for events with this `type`. - project.archived: + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.in_progress type: string - description: The project ID. - description: The details for events with this `type`. - service_account.created: + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.requires_action type: string - description: The service account ID. data: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to create the service account. - description: The details for events with this `type`. - service_account.updated: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.completed type: string - description: The service account ID. - changes_requested: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to updated the service account. - description: The details for events with this `type`. - service_account.deleted: + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) is completed.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.incomplete type: string - description: The service account ID. - description: The details for events with this `type`. - user.added: + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.failed type: string - description: The user ID. data: - type: object - properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to add the user to the project. - description: The details for events with this `type`. - user.updated: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) fails.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.cancelling type: string - description: The project ID. - changes_requested: - type: object - properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to update the user. - description: The details for events with this `type`. - user.deleted: + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data type: object properties: - id: + event: + enum: + - thread.run.cancelled type: string - description: The user ID. - description: The details for events with this `type`. - description: A log of a user action or configuration change within this organization. - x-oaiMeta: - name: The audit log object - example: "{\n \"id\": \"req_xxx_20240101\",\n \"type\": \"api_key.created\",\n \"effective_at\": 1720804090,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.created\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource.operation\"]\n }\n }\n}\n" - ListAuditLogsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/AuditLog' - first_id: - type: string - example: audit_log-defb456h8dks - last_id: - type: string - example: audit_log-hnbkd8s93s - has_more: - type: boolean - Invite: + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) is cancelled.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.expired + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) expires.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + RunToolCallObject: required: - - object - id - - email - - role - - status - - invited_at - - expires_at + - type + - function type: object properties: - object: - enum: - - organization.invite - type: string - description: 'The object type, which is always `organization.invite`' id: type: string - description: 'The identifier, which can be referenced in API endpoints' - email: - type: string - description: The email address of the individual to whom the invite was sent - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - status: - enum: - - accepted - - expired - - pending - type: string - description: '`accepted`,`expired`, or `pending`' - invited_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was sent. - expires_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite expires. - accepted_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was accepted. - description: Represents an individual `invite` to the organization. - x-oaiMeta: - name: The invite object - example: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - InviteListResponse: - required: - - object - - data - type: object - properties: - object: - enum: - - list - type: string - description: 'The object type, which is always `list`' - data: - type: array - items: - $ref: '#/components/schemas/Invite' - first_id: - type: string - description: The first `invite_id` in the retrieved `list` - last_id: - type: string - description: The last `invite_id` in the retrieved `list` - has_more: - type: boolean - description: The `has_more` property is used for pagination to indicate there are additional results. - InviteRequest: - required: - - email - - role - type: object - properties: - email: - type: string - description: Send an email to this address - role: - enum: - - reader - - owner - type: string - description: '`owner` or `reader`' - InviteDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: - object: + description: 'The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint.' + type: enum: - - organization.invite.deleted - type: string - description: 'The object type, which is always `organization.invite.deleted`' - id: + - function type: string - deleted: - type: boolean - User: + description: 'The type of tool call the output is required for. For now, this is always `function`.' + function: + required: + - name + - arguments + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + description: The function definition. + description: Tool call objects + StaticChunkingStrategy: required: - - object - - id - - name - - email - - role - - added_at + - max_chunk_size_tokens + - chunk_overlap_tokens type: object properties: - object: - enum: - - organization.user - type: string - description: 'The object type, which is always `organization.user`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - added_at: + max_chunk_size_tokens: + maximum: 4096 + minimum: 100 type: integer - description: The Unix timestamp (in seconds) of when the user was added. - description: Represents an individual `user` within an organization. - x-oaiMeta: - name: The user object - example: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - UserListResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/User' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - UserRoleUpdateRequest: + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" + additionalProperties: false + StaticChunkingStrategyRequestParam: + title: Static Chunking Strategy required: - - role + - type + - static type: object properties: - role: + type: enum: - - owner - - reader + - static type: string - description: '`owner` or `reader`' - UserDeleteResponse: + description: Always `static`. + static: + $ref: '#/components/schemas/StaticChunkingStrategy' + additionalProperties: false + StaticChunkingStrategyResponseParam: + title: Static Chunking Strategy required: - - object - - id - - deleted + - type + - static type: object properties: - object: + type: enum: - - organization.user.deleted - type: string - id: + - static type: string - deleted: + description: Always `static`. + static: + $ref: '#/components/schemas/StaticChunkingStrategy' + additionalProperties: false + SubmitToolOutputsRunRequest: + required: + - tool_outputs + type: object + properties: + tool_outputs: + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + description: A list of tools for which the outputs are being submitted. + stream: type: boolean - Project: + description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" + nullable: true + additionalProperties: false + ThreadObject: + title: Thread required: - id - object - - name - created_at - - status + - tool_resources + - metadata type: object properties: id: type: string - description: 'The identifier, which can be referenced in API endpoints' + description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - organization.project - type: string - description: 'The object type, which is always `organization.project`' - name: + - thread type: string - description: The name of the project. This appears in reporting. + description: 'The object type, which is always `thread`.' created_at: type: integer - description: The Unix timestamp (in seconds) of when the project was created. - archived_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was archived or `null`. + description: The Unix timestamp (in seconds) for when the thread was created. + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" nullable: true - status: - enum: - - active - - archived - type: string - description: '`active` or `archived`' - description: Represents an individual project. + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + description: 'Represents a thread that contains [messages](/docs/api-reference/messages).' x-oaiMeta: - name: The project object - example: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - ProjectListResponse: + name: The thread object + beta: true + example: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" + ThreadStreamEvent: + oneOf: + - required: + - event + - data + type: object + properties: + enabled: + type: boolean + description: Whether to enable input audio transcription. + event: + enum: + - thread.created + type: string + data: + $ref: '#/components/schemas/ThreadObject' + description: 'Occurs when a new [thread](/docs/api-reference/threads/object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [thread](/docs/api-reference/threads/object)' + TranscriptionSegment: required: - - object - - data - - first_id - - last_id - - has_more + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob type: object properties: - object: - enum: - - list + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + description: Start time of the segment in seconds. + format: float + end: + type: number + description: End time of the segment in seconds. + format: float + text: type: string - data: + description: Text content of the segment. + tokens: type: array items: - $ref: '#/components/schemas/Project' - first_id: - type: string - last_id: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + description: Temperature parameter used for generating the segment. + format: float + avg_logprob: + type: number + description: 'Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.' + format: float + compression_ratio: + type: number + description: 'Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.' + format: float + no_speech_prob: + type: number + description: 'Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent.' + format: float + TranscriptionWord: + required: + - word + - start + - end + type: object + properties: + word: type: string - has_more: - type: boolean - ProjectCreateRequest: + description: The text content of the word. + start: + type: number + description: Start time of the word in seconds. + format: float + end: + type: number + description: End time of the word in seconds. + format: float + TruncationObject: + title: Thread Truncation Controls required: - - name + - type + type: object + properties: + type: + enum: + - auto + - last_messages + type: string + description: 'The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.' + last_messages: + minimum: 1 + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + nullable: true + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + UpdateVectorStoreRequest: type: object properties: name: type: string - description: 'The friendly name of the project, this name appears in reports.' - ProjectUpdateRequest: - required: - - name - type: object - properties: - name: + description: The name of the vector store. + nullable: true + expires_after: + $ref: '#/components/schemas/VectorStoreExpirationAfter' + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false + Upload: + title: Upload + required: + - bytes + - created_at + - expires_at + - filename + - id + - purpose + - status + type: object + properties: + id: + type: string + description: 'The Upload unique identifier, which can be referenced in API endpoints.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: 'The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.' + status: + enum: + - pending + - completed + - cancelled + - expired + type: string + description: The status of the Upload. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + enum: + - upload type: string - description: 'The updated name of the project, this name appears in reports.' - DefaultProjectErrorResponse: + description: 'The object type, which is always "upload".' + file: + $ref: '#/components/schemas/OpenAIFile' + description: "The Upload object can accept byte chunks in the form of Parts.\n" + x-oaiMeta: + name: The upload object + example: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" + UploadPart: + title: UploadPart required: - - code - - message + - created_at + - id + - object + - upload_id type: object properties: - code: + id: + type: string + description: 'The upload Part unique identifier, which can be referenced in API endpoints.' + created_at: type: integer - message: + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: type: string - ProjectUser: + description: The ID of the Upload object that this Part was added to. + object: + enum: + - upload.part + type: string + description: 'The object type, which is always `upload.part`.' + description: "The upload Part represents a chunk of bytes we can add to an Upload object.\n" + x-oaiMeta: + name: The upload part object + example: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719186911,\n \"upload_id\": \"upload_abc123\"\n}\n" + User: required: - object - id @@ -10730,9 +12423,9 @@ components: properties: object: enum: - - organization.project.user + - organization.user type: string - description: 'The object type, which is always `organization.project.user`' + description: 'The object type, which is always `organization.user`' id: type: string description: 'The identifier, which can be referenced in API endpoints' @@ -10745,64 +12438,17 @@ components: role: enum: - owner - - member + - reader type: string - description: '`owner` or `member`' + description: '`owner` or `reader`' added_at: type: integer - description: The Unix timestamp (in seconds) of when the project was added. - description: Represents an individual user in a project. + description: The Unix timestamp (in seconds) of when the user was added. + description: Represents an individual `user` within an organization. x-oaiMeta: - name: The project user object - example: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - ProjectUserListResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/ProjectUser' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectUserCreateRequest: - required: - - user_id - - role - type: object - properties: - user_id: - type: string - description: The ID of the user. - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - ProjectUserUpdateRequest: - required: - - role - type: object - properties: - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - ProjectUserDeleteResponse: + name: The user object + example: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + UserDeleteResponse: required: - object - id @@ -10811,46 +12457,13 @@ components: properties: object: enum: - - organization.project.user.deleted + - organization.user.deleted type: string id: type: string deleted: type: boolean - ProjectServiceAccount: - required: - - object - - id - - name - - role - - created_at - type: object - properties: - object: - enum: - - organization.project.service_account - type: string - description: 'The object type, which is always `organization.project.service_account`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the service account - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the service account was created - description: Represents an individual service account in a project. - x-oaiMeta: - name: The project service account object - example: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - ProjectServiceAccountListResponse: + UserListResponse: required: - object - data @@ -10866,167 +12479,253 @@ components: data: type: array items: - $ref: '#/components/schemas/ProjectServiceAccount' + $ref: '#/components/schemas/User' first_id: type: string last_id: type: string has_more: type: boolean - ProjectServiceAccountCreateRequest: + UserRoleUpdateRequest: required: - - name + - role type: object properties: - name: + role: + enum: + - owner + - reader type: string - description: The name of the service account being created. - ProjectServiceAccountCreateResponse: + description: '`owner` or `reader`' + VectorStoreExpirationAfter: + title: Vector store expiration policy required: - - object - - id - - name - - role - - created_at - - api_key + - anchor + - days type: object properties: - object: + anchor: enum: - - organization.project.service_account + - last_active_at type: string + description: 'Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.' + days: + maximum: 365 + minimum: 1 + type: integer + description: The number of days after the anchor time that the vector store will expire. + description: The expiration policy for a vector store. + VectorStoreFileBatchObject: + title: Vector store file batch + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + type: object + properties: id: type: string - name: - type: string - role: + description: 'The identifier, which can be referenced in API endpoints.' + object: enum: - - member + - vector_store.files_batch type: string - description: Service accounts can only have one role of type `member` + description: 'The object type, which is always `vector_store.file_batch`.' created_at: type: integer - api_key: - $ref: '#/components/schemas/ProjectServiceAccountApiKey' - ProjectServiceAccountApiKey: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + vector_store_id: + type: string + description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' + status: + enum: + - in_progress + - completed + - cancelled + - failed + type: string + description: 'The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.' + file_counts: + required: + - in_progress + - completed + - cancelled + - failed + - total + type: object + properties: + in_progress: + type: integer + description: The number of files that are currently being processed. + completed: + type: integer + description: The number of files that have been processed. + failed: + type: integer + description: The number of files that have failed to process. + cancelled: + type: integer + description: The number of files that where cancelled. + total: + type: integer + description: The total number of files. + description: A batch of files attached to a vector store. + x-oaiMeta: + name: The vector store files batch object + beta: true + example: "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" + VectorStoreFileObject: + title: Vector store files required: - - object - - value - - name - - created_at - id - type: object - properties: - object: - enum: - - organization.project.service_account.api_key - type: string - description: 'The object type, which is always `organization.project.service_account.api_key`' - value: - type: string - name: - type: string - created_at: - type: integer - id: - type: string - ProjectServiceAccountDeleteResponse: - required: - object - - id - - deleted + - usage_bytes + - created_at + - vector_store_id + - status + - last_error type: object properties: - object: - enum: - - organization.project.service_account.deleted - type: string id: type: string - deleted: - type: boolean - ProjectApiKey: - required: - - object - - redacted_value - - name - - created_at - - id - - owner - type: object - properties: + description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - organization.project.api_key - type: string - description: 'The object type, which is always `organization.project.api_key`' - redacted_value: - type: string - description: The redacted value of the API key - name: + - vector_store.file type: string - description: The name of the API key + description: 'The object type, which is always `vector_store.file`.' + usage_bytes: + type: integer + description: The total vector store usage in bytes. Note that this may be different from the original file size. created_at: type: integer - description: The Unix timestamp (in seconds) of when the API key was created - id: + description: The Unix timestamp (in seconds) for when the vector store file was created. + vector_store_id: type: string - description: 'The identifier, which can be referenced in API endpoints' - owner: + description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' + status: + enum: + - in_progress + - completed + - cancelled + - failed + type: string + description: 'The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.' + last_error: + required: + - code + - message type: object properties: - type: + code: enum: - - user - - service_account + - server_error + - unsupported_file + - invalid_file type: string - description: '`user` or `service_account`' - user: - $ref: '#/components/schemas/ProjectUser' - service_account: - $ref: '#/components/schemas/ProjectServiceAccount' - description: Represents an individual API key in a project. + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + description: The last error associated with this vector store file. Will be `null` if there are no errors. + nullable: true + chunking_strategy: + type: object + oneOf: + - $ref: '#/components/schemas/StaticChunkingStrategyResponseParam' + - $ref: '#/components/schemas/OtherChunkingStrategyResponseParam' + description: The strategy used to chunk the file. + x-oaiExpandable: true + description: A list of files attached to a vector store. x-oaiMeta: - name: The project API key object - example: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n }\n}\n" - ProjectApiKeyListResponse: + name: The vector store file object + beta: true + example: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" + VectorStoreObject: + title: Vector store required: + - id - object - - data - - first_id - - last_id - - has_more + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata type: object properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/ProjectApiKey' - first_id: + - vector_store type: string - last_id: + description: 'The object type, which is always `vector_store`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the vector store was created. + name: type: string - has_more: - type: boolean - ProjectApiKeyDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: - object: + description: The name of the vector store. + usage_bytes: + type: integer + description: The total number of bytes used by the files in the vector store. + file_counts: + required: + - in_progress + - completed + - failed + - cancelled + - total + type: object + properties: + in_progress: + type: integer + description: The number of files that are currently being processed. + completed: + type: integer + description: The number of files that have been successfully processed. + failed: + type: integer + description: The number of files that have failed to process. + cancelled: + type: integer + description: The number of files that were cancelled. + total: + type: integer + description: The total number of files. + status: enum: - - organization.project.api_key.deleted - type: string - id: + - expired + - in_progress + - completed type: string - deleted: - type: boolean + description: 'The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.' + expires_after: + $ref: '#/components/schemas/VectorStoreExpirationAfter' + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the vector store will expire. + nullable: true + last_active_at: + type: integer + description: The Unix timestamp (in seconds) for when the vector store was last active. + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + description: A vector store is a collection of processed files can be used by the `file_search` tool. + x-oaiMeta: + name: The vector store object + beta: true + example: "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"metadata\": {},\n \"last_used_at\": 1698107661\n}\n" RealtimeServerEventType: enum: - error @@ -12671,8 +14370,12 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants + beta: true - id: administration title: Administration + - id: realtime + title: Realtime + beta: true - id: legacy title: Legacy groups: @@ -12698,7 +14401,7 @@ x-oaiMeta: path: verbose-json-object - id: chat title: Chat - description: "Given a list of messages comprising a conversation, the model will return a response.\n\nRelated guide: [Chat Completions](/docs/guides/text-generation)\n" + description: "Given a list of messages comprising a conversation, the model will return a response.\nRelated guide: [Chat Completions](/docs/guides/text-generation)\n" navigationGroup: endpoints sections: - type: endpoint @@ -12712,7 +14415,7 @@ x-oaiMeta: path: streaming - id: embeddings title: Embeddings - description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\n\nRelated guide: [Embeddings](/docs/guides/embeddings)\n" + description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\nRelated guide: [Embeddings](/docs/guides/embeddings)\n" navigationGroup: endpoints sections: - type: endpoint @@ -12723,7 +14426,7 @@ x-oaiMeta: path: object - id: fine-tuning title: Fine-tuning - description: "Manage fine-tuning jobs to tailor a model to your specific training data.\n\nRelated guide: [Fine-tune models](/docs/guides/fine-tuning)\n" + description: "Manage fine-tuning jobs to tailor a model to your specific training data.\nRelated guide: [Fine-tune models](/docs/guides/fine-tuning)\n" navigationGroup: endpoints sections: - type: endpoint @@ -12761,7 +14464,7 @@ x-oaiMeta: path: checkpoint-object - id: batch title: Batch - description: "Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\n\nRelated guide: [Batch](/docs/guides/batch)\n" + description: "Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\nRelated guide: [Batch](/docs/guides/batch)\n" navigationGroup: endpoints sections: - type: endpoint @@ -12833,7 +14536,7 @@ x-oaiMeta: path: part-object - id: images title: Images - description: "Given a prompt and/or an input image, the model will generate a new image.\n\nRelated guide: [Image generation](/docs/guides/images)\n" + description: "Given a prompt and/or an input image, the model will generate a new image.\nRelated guide: [Image generation](/docs/guides/images)\n" navigationGroup: endpoints sections: - type: endpoint @@ -12867,7 +14570,7 @@ x-oaiMeta: path: object - id: moderations title: Moderations - description: "Given text and/or image inputs, classifies if those inputs are potentially harmful across several categories.\n\nRelated guide: [Moderations](/docs/guides/moderation)\n" + description: "Given text and/or image inputs, classifies if those inputs are potentially harmful across several categories.\nRelated guide: [Moderations](/docs/guides/moderation)\n" navigationGroup: endpoints sections: - type: endpoint @@ -12976,7 +14679,7 @@ x-oaiMeta: key: RunObject path: object - id: run-steps - title: Run Steps + title: Run steps beta: true description: "Represents the steps (model and tool calls) taken during the run.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" navigationGroup: assistants @@ -12991,7 +14694,7 @@ x-oaiMeta: key: RunStepObject path: step-object - id: vector-stores - title: Vector Stores + title: Vector stores beta: true description: "Vector stores are used to store files for use by the `file_search` tool.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" navigationGroup: assistants @@ -13015,7 +14718,7 @@ x-oaiMeta: key: VectorStoreObject path: object - id: vector-stores-files - title: Vector Store Files + title: Vector store files beta: true description: "Vector store files represent files inside a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" navigationGroup: assistants @@ -13036,9 +14739,9 @@ x-oaiMeta: key: VectorStoreFileObject path: file-object - id: vector-stores-file-batches - title: Vector Store File Batches + title: Vector store file batches beta: true - description: "Vector store file batches represent operations to add multiple files to a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" + description: "Vector store file batches represent operations to add multiple files to a vector store.\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" navigationGroup: assistants sections: - type: endpoint @@ -13059,7 +14762,7 @@ x-oaiMeta: - id: assistants-streaming title: Streaming beta: true - description: "Stream the result of executing a Run or resuming a Run after submitting tool outputs.\n\nYou can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun),\n[Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs)\nendpoints by passing `\"stream\": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream.\n\nOur Node and Python SDKs provide helpful utilities to make streaming easy. Reference the\n[Assistants API quickstart](/docs/assistants/overview) to learn more.\n" + description: "Stream the result of executing a Run or resuming a Run after submitting tool outputs.\nYou can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun),\n[Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs)\nendpoints by passing `\"stream\": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream.\nOur Node and Python SDKs provide helpful utilities to make streaming easy. Reference the\n[Assistants API quickstart](/docs/assistants/overview) to learn more.\n" navigationGroup: assistants sections: - type: object @@ -13072,8 +14775,8 @@ x-oaiMeta: key: AssistantStreamEvent path: events - id: administration - title: Overview - description: "Programmatically manage your organization. \n\nThe Audit Logs endpoint provides a log of all actions taken in the \norganization for security and monitoring purposes.\n\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\n\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization)\n" + title: Administration + description: "Programmatically manage your organization. \nThe Audit Logs endpoint provides a log of all actions taken in the organization for security and monitoring purposes.\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization)\n" navigationGroup: administration - id: invite title: Invites @@ -13117,7 +14820,7 @@ x-oaiMeta: path: object - id: projects title: Projects - description: "Manage the projects within an orgnanization includes creation, updating, and archiving or projects. \nThe Default project cannot be modified or archived. \n" + description: "Manage the projects within an orgnanization includes creation, updating, and archiving or projects. \nThe Default project cannot be modified or archived.\n" navigationGroup: administration sections: - type: endpoint @@ -13139,8 +14842,8 @@ x-oaiMeta: key: Project path: object - id: project-users - title: Project Users - description: "Manage users within a project, including adding, updating roles, and removing users. \nUsers cannot be removed from the Default project, unless they are being removed from the organization. \n" + title: Project users + description: "Manage users within a project, including adding, updating roles, and removing users. \nUsers cannot be removed from the Default project, unless they are being removed from the organization.\n" navigationGroup: administration sections: - type: endpoint @@ -13162,7 +14865,7 @@ x-oaiMeta: key: ProjectUser path: object - id: project-service-accounts - title: Project Service Accounts + title: Project service accounts description: "Manage service accounts within a project. A service account is a bot user that is not associated with a user. \nIf a user leaves an organization, their keys and membership in projects will no longer work. Service accounts \ndo not have this limitation. However, service accounts can also be deleted from a project.\n" navigationGroup: administration sections: @@ -13182,8 +14885,8 @@ x-oaiMeta: key: ProjectServiceAccount path: object - id: project-api-keys - title: Project API Keys - description: "Manage API keys for a given project. Supports listing and deleting keys for users. \nThis API does not allow issuing keys for users, as users need to authorize themselves to generate keys. \n" + title: Project API keys + description: "Manage API keys for a given project. Supports listing and deleting keys for users. \nThis API does not allow issuing keys for users, as users need to authorize themselves to generate keys.\n" navigationGroup: administration sections: - type: endpoint @@ -13199,8 +14902,8 @@ x-oaiMeta: key: ProjectApiKey path: object - id: audit-logs - title: Audit Logs - description: "Logs of user actions and configuration changes within this organization. \n\nTo log events, you must activate logging in the [Organization Settings](/settings/organization/general). \nOnce activated, for security reasons, logging cannot be deactivated.\n" + title: Audit logs + description: "Logs of user actions and configuration changes within this organization. \nTo log events, you must activate logging in the [Organization Settings](/settings/organization/general). \nOnce activated, for security reasons, logging cannot be deactivated.\n" navigationGroup: administration sections: - type: endpoint @@ -13209,6 +14912,132 @@ x-oaiMeta: - type: object key: AuditLog path: object + - id: realtime + title: Realtime + beta: true + description: "Communicate with a GPT-4o class model live, in real time, over WebSocket.\nProduces both audio and text transcriptions.\n[Learn more about the Realtime API](/docs/guides/realtime).\n" + navigationGroup: realtime + - id: realtime-client-events + title: Client events + description: "These are events that the OpenAI Realtime WebSocket server will accept from the client.\n" + navigationGroup: realtime + sections: + - type: object + key: RealtimeClientEventSessionUpdate + path: + - type: object + key: RealtimeClientEventInputAudioBufferAppend + path: + - type: object + key: RealtimeClientEventInputAudioBufferCommit + path: + - type: object + key: RealtimeClientEventInputAudioBufferClear + path: + - type: object + key: RealtimeClientEventConversationItemCreate + path: + - type: object + key: RealtimeClientEventConversationItemTruncate + path: + - type: object + key: RealtimeClientEventConversationItemDelete + path: + - type: object + key: RealtimeClientEventResponseCreate + path: + - type: object + key: RealtimeClientEventResponseCancel + path: + - id: realtime-server-events + title: Server events + description: "These are events emitted from the OpenAI Realtime WebSocket server to the client.\n" + navigationGroup: realtime + sections: + - type: object + key: RealtimeServerEventError + path: + - type: object + key: RealtimeServerEventSessionCreated + path: + - type: object + key: RealtimeServerEventSessionUpdated + path: + - type: object + key: RealtimeServerEventConversationCreated + path: + - type: object + key: RealtimeServerEventConversationItemCreated + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionFailed + path: + - type: object + key: RealtimeServerEventConversationItemTruncated + path: + - type: object + key: RealtimeServerEventConversationItemDeleted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCommitted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCleared + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStarted + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStopped + path: + - type: object + key: RealtimeServerEventResponseCreated + path: + - type: object + key: RealtimeServerEventResponseDone + path: + - type: object + key: RealtimeServerEventResponseOutputItemAdded + path: + - type: object + key: RealtimeServerEventResponseOutputItemDone + path: + - type: object + key: RealtimeServerEventResponseContentPartAdded + path: + - type: object + key: RealtimeServerEventResponseContentPartDone + path: + - type: object + key: RealtimeServerEventResponseTextDelta + path: + - type: object + key: RealtimeServerEventResponseTextDone + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDelta + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDone + path: + - type: object + key: RealtimeServerEventResponseAudioDelta + path: + - type: object + key: RealtimeServerEventResponseAudioDone + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDelta + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDone + path: + - type: object + key: RealtimeServerEventRateLimitsUpdated + path: - id: completions title: Completions legacy: true