Skip to content

Commit

Permalink
feat: Updated OpenAPI spec
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Sep 11, 2024
1 parent e1f1b4c commit ff9b8de
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,7 @@ partial void ProcessOpenaiChatCompletionsV1OpenaiChatCompletionsPostResponseCont
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens to generate in the chat completion.<br/>
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.<br/>
/// Default Value: 512
/// The total length of input tokens and generated tokens is limited by the model's context length.
/// </param>
/// <param name="stop">
/// up to 16 sequences where the API will stop generating further tokens
Expand Down Expand Up @@ -189,7 +188,7 @@ partial void ProcessOpenaiChatCompletionsV1OpenaiChatCompletionsPostResponseCont
double temperature = 1,
double topP = 1,
int topK = 0,
int maxTokens = 512,
int maxTokens = default,
global::System.AnyOf<string, global::System.Collections.Generic.IList<string>>? stop = default,
int n = 1,
double presencePenalty = 0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,10 @@ public sealed partial class OpenAIChatCompletionsIn

/// <summary>
/// The maximum number of tokens to generate in the chat completion.<br/>
/// The total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length.<br/>
/// Default Value: 512
/// The total length of input tokens and generated tokens is limited by the model's context length.
/// </summary>
[global::System.Text.Json.Serialization.JsonPropertyName("max_tokens")]
public int MaxTokens { get; set; } = 512;
public int MaxTokens { get; set; }

/// <summary>
/// up to 16 sequences where the API will stop generating further tokens
Expand Down
3 changes: 1 addition & 2 deletions src/libs/DeepInfra/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3633,8 +3633,7 @@ components:
maximum: 1000000.0
minimum: 0.0
type: integer
description: "The maximum number of tokens to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length.If explicitly set to None it will be the model's max context length minus input length."
default: 512
description: "The maximum number of tokens to generate in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length."
stop:
title: Stop
anyOf:
Expand Down

0 comments on commit ff9b8de

Please sign in to comment.