Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat:No changes made in the pull request. #139

Merged
merged 1 commit into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -16,25 +16,37 @@ public sealed partial class UsageCompletionsResult
public global::OpenAI.UsageCompletionsResultObject Object { get; set; }

/// <summary>
/// The aggregated number of input tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
/// The aggregated number of text input tokens used, including cached tokens. For customers subscribe to scale tier, this includes scale tier tokens.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("input_tokens")]
[global::System.Text.Json.Serialization.JsonRequired]
public required int InputTokens { get; set; }

/// <summary>
/// The aggregated number of input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
/// The aggregated number of text input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("input_cached_tokens")]
public int? InputCachedTokens { get; set; }

/// <summary>
/// The aggregated number of output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
/// The aggregated number of text output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("output_tokens")]
[global::System.Text.Json.Serialization.JsonRequired]
public required int OutputTokens { get; set; }

/// <summary>
/// The aggregated number of audio input tokens used, including cached tokens.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("input_audio_tokens")]
public int? InputAudioTokens { get; set; }

/// <summary>
/// The aggregated number of audio output tokens used.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("output_audio_tokens")]
public int? OutputAudioTokens { get; set; }

/// <summary>
/// The count of requests made to the model.
/// </summary>
Expand Down Expand Up @@ -83,13 +95,19 @@ public sealed partial class UsageCompletionsResult
/// </summary>
/// <param name="object"></param>
/// <param name="inputTokens">
/// The aggregated number of input tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
/// The aggregated number of text input tokens used, including cached tokens. For customers subscribe to scale tier, this includes scale tier tokens.
/// </param>
/// <param name="inputCachedTokens">
/// The aggregated number of input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
/// The aggregated number of text input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.
/// </param>
/// <param name="outputTokens">
/// The aggregated number of output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
/// The aggregated number of text output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.
/// </param>
/// <param name="inputAudioTokens">
/// The aggregated number of audio input tokens used, including cached tokens.
/// </param>
/// <param name="outputAudioTokens">
/// The aggregated number of audio output tokens used.
/// </param>
/// <param name="numModelRequests">
/// The count of requests made to the model.
Expand All @@ -116,6 +134,8 @@ public UsageCompletionsResult(
int numModelRequests,
global::OpenAI.UsageCompletionsResultObject @object,
int? inputCachedTokens,
int? inputAudioTokens,
int? outputAudioTokens,
string? projectId,
string? userId,
string? apiKeyId,
Expand All @@ -127,6 +147,8 @@ public UsageCompletionsResult(
this.NumModelRequests = numModelRequests;
this.Object = @object;
this.InputCachedTokens = inputCachedTokens;
this.InputAudioTokens = inputAudioTokens;
this.OutputAudioTokens = outputAudioTokens;
this.ProjectId = projectId;
this.UserId = userId;
this.ApiKeyId = apiKeyId;
Expand Down
16 changes: 11 additions & 5 deletions src/libs/OpenAI/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2551,7 +2551,7 @@ paths:
examples:
request:
curl: "curl \"https://api.openai.com/v1/organization/usage/completions?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n"
response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 1000,\n \"output_tokens\": 500,\n \"input_cached_tokens\": 800,\n \"num_model_requests\": 5,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null,\n \"batch\": null\n }\n ]\n }\n ],\n \"has_more\": true,\n \"next_page\": \"AAAAAGdGxdEiJdKOAAAAAGcqsYA=\"\n}\n"
response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 1000,\n \"output_tokens\": 500,\n \"input_cached_tokens\": 800,\n \"input_audio_tokens\": 0,\n \"output_audio_tokens\": 0,\n \"num_model_requests\": 5,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null,\n \"batch\": null\n }\n ]\n }\n ],\n \"has_more\": true,\n \"next_page\": \"page_AAAAAGdGxdEiJdKOAAAAAGcqsYA=\"\n}\n"
/organization/usage/embeddings:
get:
tags:
Expand Down Expand Up @@ -13464,13 +13464,19 @@ components:
type: string
input_tokens:
type: integer
description: 'The aggregated number of input tokens used. For customers subscribe to scale tier, this includes scale tier tokens.'
description: 'The aggregated number of text input tokens used, including cached tokens. For customers subscribe to scale tier, this includes scale tier tokens.'
input_cached_tokens:
type: integer
description: 'The aggregated number of input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.'
description: 'The aggregated number of text input tokens that has been cached from previous requests. For customers subscribe to scale tier, this includes scale tier tokens.'
output_tokens:
type: integer
description: 'The aggregated number of output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.'
description: 'The aggregated number of text output tokens used. For customers subscribe to scale tier, this includes scale tier tokens.'
input_audio_tokens:
type: integer
description: 'The aggregated number of audio input tokens used, including cached tokens.'
output_audio_tokens:
type: integer
description: The aggregated number of audio output tokens used.
num_model_requests:
type: integer
description: The count of requests made to the model.
Expand All @@ -13497,7 +13503,7 @@ components:
description: The aggregated completions usage details of the specific time bucket.
x-oaiMeta:
name: Completions usage object
example: "{\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 5000,\n \"output_tokens\": 1000,\n \"input_cached_tokens\": 4000,\n \"num_model_requests\": 5,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"batch\": false\n}\n"
example: "{\n \"object\": \"organization.usage.completions.result\",\n \"input_tokens\": 5000,\n \"output_tokens\": 1000,\n \"input_cached_tokens\": 4000,\n \"input_audio_tokens\": 300,\n \"output_audio_tokens\": 200,\n \"num_model_requests\": 5,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"batch\": false\n}\n"
UsageEmbeddingsResult:
required:
- object
Expand Down
Loading