diff --git a/src/libs/OpenAI/Generated/OpenAI.Models.UsageCompletionsResult.g.cs b/src/libs/OpenAI/Generated/OpenAI.Models.UsageCompletionsResult.g.cs
index 122c29c..48cb433 100644
--- a/src/libs/OpenAI/Generated/OpenAI.Models.UsageCompletionsResult.g.cs
+++ b/src/libs/OpenAI/Generated/OpenAI.Models.UsageCompletionsResult.g.cs
@@ -16,25 +16,37 @@ public sealed partial class UsageCompletionsResult
public global::OpenAI.UsageCompletionsResultObject Object { get; set; }
///
- /// The aggregated number of input tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
+ /// The aggregated number of text input tokens used, including cached tokens. For customers subscribe to scale tier, this includes scale tier tokens.
///
[global::System.Text.Json.Serialization.JsonPropertyName("input_tokens")]
[global::System.Text.Json.Serialization.JsonRequired]
public required int InputTokens { get; set; }
///
- /// The aggregated number of input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
+ /// The aggregated number of text input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
///
[global::System.Text.Json.Serialization.JsonPropertyName("input_cached_tokens")]
public int? InputCachedTokens { get; set; }
///
- /// The aggregated number of output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
+ /// The aggregated number of text output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
///
[global::System.Text.Json.Serialization.JsonPropertyName("output_tokens")]
[global::System.Text.Json.Serialization.JsonRequired]
public required int OutputTokens { get; set; }
+ ///
+ /// The aggregated number of audio input tokens used, including cached tokens.
+ ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_tokens")]
+ public int? InputAudioTokens { get; set; }
+
+ ///
+ /// The aggregated number of audio output tokens used.
+ ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_tokens")]
+ public int? OutputAudioTokens { get; set; }
+
///
/// The count of requests made to the model.
///
@@ -83,13 +95,19 @@ public sealed partial class UsageCompletionsResult
///
///
///
- /// The aggregated number of input tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
+ /// The aggregated number of text input tokens used, including cached tokens. For customers subscribe to scale tier, this includes scale tier tokens.
///
///
- /// The aggregated number of input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
+ /// The aggregated number of text input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
///
///
- /// The aggregated number of output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
+ /// The aggregated number of text output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
+ ///
+ ///
+ /// The aggregated number of audio input tokens used, including cached tokens.
+ ///
+ ///
+ /// The aggregated number of audio output tokens used.
///
///
/// The count of requests made to the model.
@@ -116,6 +134,8 @@ public UsageCompletionsResult(
int numModelRequests,
global::OpenAI.UsageCompletionsResultObject @object,
int? inputCachedTokens,
+ int? inputAudioTokens,
+ int? outputAudioTokens,
string? projectId,
string? userId,
string? apiKeyId,
@@ -127,6 +147,8 @@ public UsageCompletionsResult(
this.NumModelRequests = numModelRequests;
this.Object = @object;
this.InputCachedTokens = inputCachedTokens;
+ this.InputAudioTokens = inputAudioTokens;
+ this.OutputAudioTokens = outputAudioTokens;
this.ProjectId = projectId;
this.UserId = userId;
this.ApiKeyId = apiKeyId;
diff --git a/src/libs/OpenAI/openapi.yaml b/src/libs/OpenAI/openapi.yaml
index 72e33b6..f85285e 100644
--- a/src/libs/OpenAI/openapi.yaml
+++ b/src/libs/OpenAI/openapi.yaml
@@ -2551,7 +2551,7 @@ paths:
examples:
request:
curl: "curl \"https://api.openai.com/v1/organization/usage/completions?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n"
- response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 1000,\n \"output_tokens\": 500,\n \"input_cached_tokens\": 800,\n \"num_model_requests\": 5,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null,\n \"batch\": null\n }\n ]\n }\n ],\n \"has_more\": true,\n \"next_page\": \"AAAAAGdGxdEiJdKOAAAAAGcqsYA=\"\n}\n"
+ response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 1000,\n \"output_tokens\": 500,\n \"input_cached_tokens\": 800,\n \"input_audio_tokens\": 0,\n \"output_audio_tokens\": 0,\n \"num_model_requests\": 5,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null,\n \"batch\": null\n }\n ]\n }\n ],\n \"has_more\": true,\n \"next_page\": \"page_AAAAAGdGxdEiJdKOAAAAAGcqsYA=\"\n}\n"
/organization/usage/embeddings:
get:
tags:
@@ -13464,13 +13464,19 @@ components:
type: string
input_tokens:
type: integer
- description: 'The aggregated number of input tokens used. For customers subscribe to scale tier, this includes scale tier tokens.'
+ description: 'The aggregated number of text input tokens used, including cached tokens. For customers subscribe to scale tier, this includes scale tier tokens.'
input_cached_tokens:
type: integer
- description: 'The aggregated number of input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.'
+ description: 'The aggregated number of text input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.'
output_tokens:
type: integer
- description: 'The aggregated number of output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.'
+ description: 'The aggregated number of text output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.'
+ input_audio_tokens:
+ type: integer
+ description: 'The aggregated number of audio input tokens used, including cached tokens.'
+ output_audio_tokens:
+ type: integer
+ description: The aggregated number of audio output tokens used.
num_model_requests:
type: integer
description: The count of requests made to the model.
@@ -13497,7 +13503,7 @@ components:
description: The aggregated completions usage details of the specific time bucket.
x-oaiMeta:
name: Completions usage object
- example: "{\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 5000,\n \"output_tokens\": 1000,\n \"input_cached_tokens\": 4000,\n \"num_model_requests\": 5,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"batch\": false\n}\n"
+ example: "{\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 5000,\n \"output_tokens\": 1000,\n \"input_cached_tokens\": 4000,\n \"input_audio_tokens\": 300,\n \"output_audio_tokens\": 200,\n \"num_model_requests\": 5,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"batch\": false\n}\n"
UsageEmbeddingsResult:
required:
- object