Skip to content

Commit

Permalink
fix: Fixed issue with OpenAI spec.
Browse files Browse the repository at this point in the history
  • Loading branch information
HavenDV committed Dec 4, 2024
1 parent bf5c9c3 commit 251fc04
Show file tree
Hide file tree
Showing 1,297 changed files with 133,591 additions and 36,586 deletions.
36,091 changes: 24,842 additions & 11,249 deletions specs/openai.yaml

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions src/libs/AutoSDK/Extensions/StringExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ public static string ToParameterName(this string input)
"" => string.Empty,
"Event" => "@event",
"event" => "@event",
"Object" => "@object",
"object" => "@object",
"Namespace" => "@namespace",
"namespace" => "@namespace",
#pragma warning disable CA1308 // Normalize strings to uppercase
Expand Down
7 changes: 6 additions & 1 deletion src/libs/AutoSDK/Sources/Sources.Models.AnyOf.cs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,11 @@ public static string GenerateAnyOf(
{x.Name} = {x.ParameterName};
").Inject()}
}}" : " ";
var objectProperty =
anyOfData.Properties.Any(x => x.ParameterName == "object") ||
anyOfData.DiscriminatorPropertyName == "Object"
? "Object1"
: "Object";

return $@"{(anyOfData.IsNamed ? @"#pragma warning disable CS0618 // Type or member is obsolete
" : "")}
Expand Down Expand Up @@ -102,7 +107,7 @@ namespace {anyOfData.Namespace}
{constructorWithAllValues}
{string.Empty.ToXmlDocumentationSummary(level: 8)}
public object? Object =>
public object? {objectProperty} =>
{anyOfData.Properties.Reverse().Select(x => $@"
{x.Name} as object ??
").Inject().TrimEnd('?', '\n')}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ partial void ProcessCreateAssistantResponseContent(
/// Create an assistant with a model and instructions.
/// </summary>
/// <param name="model">
/// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.<br/>
/// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.<br/>
/// Example: gpt-4o
/// </param>
/// <param name="name">
Expand All @@ -175,28 +175,30 @@ partial void ProcessCreateAssistantResponseContent(
/// The system instructions that the assistant uses. The maximum length is 256,000 characters.
/// </param>
/// <param name="tools">
/// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
/// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.<br/>
/// Default Value: []
/// </param>
/// <param name="toolResources">
/// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
/// </param>
/// <param name="metadata">
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
/// </param>
/// <param name="temperature">
/// empty<br/>
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.<br/>
/// Default Value: 1<br/>
/// Example: 1
/// </param>
/// <param name="topP">
/// empty<br/>
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br/>
/// We generally recommend altering this or temperature but not both.<br/>
/// Default Value: 1<br/>
/// Example: 1
/// </param>
/// <param name="responseFormat">
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.<br/>
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.<br/>
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ partial void ProcessCreateMessageResponseContent(
/// A list of files attached to the message, and the tools they should be added to.
/// </param>
/// <param name="metadata">
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@ public partial class AssistantsClient
partial void PrepareCreateRunArguments(
global::System.Net.Http.HttpClient httpClient,
ref string threadId,
global::System.Collections.Generic.IList<global::G.CreateRunIncludeItem>? include,
global::G.CreateRunRequest request);
partial void PrepareCreateRunRequest(
global::System.Net.Http.HttpClient httpClient,
global::System.Net.Http.HttpRequestMessage httpRequestMessage,
string threadId,
global::System.Collections.Generic.IList<global::G.CreateRunIncludeItem>? include,
global::G.CreateRunRequest request);
partial void ProcessCreateRunResponse(
global::System.Net.Http.HttpClient httpClient,
Expand All @@ -28,12 +30,14 @@ partial void ProcessCreateRunResponseContent(
/// Create a run.
/// </summary>
/// <param name="threadId"></param>
/// <param name="include"></param>
/// <param name="request"></param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::G.ApiException"></exception>
public async global::System.Threading.Tasks.Task<global::G.RunObject> CreateRunAsync(
string threadId,
global::G.CreateRunRequest request,
global::System.Collections.Generic.IList<global::G.CreateRunIncludeItem>? include = default,
global::System.Threading.CancellationToken cancellationToken = default)
{
request = request ?? throw new global::System.ArgumentNullException(nameof(request));
Expand All @@ -43,6 +47,7 @@ partial void ProcessCreateRunResponseContent(
PrepareCreateRunArguments(
httpClient: HttpClient,
threadId: ref threadId,
include: include,
request: request);

var __pathBuilder = new PathBuilder(
Expand Down Expand Up @@ -86,6 +91,7 @@ partial void ProcessCreateRunResponseContent(
httpClient: HttpClient,
httpRequestMessage: __httpRequest,
threadId: threadId,
include: include,
request: request);

using var __response = await HttpClient.SendAsync(
Expand Down Expand Up @@ -168,6 +174,7 @@ partial void ProcessCreateRunResponseContent(
/// Create a run.
/// </summary>
/// <param name="threadId"></param>
/// <param name="include"></param>
/// <param name="assistantId">
/// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.
/// </param>
Expand All @@ -188,15 +195,16 @@ partial void ProcessCreateRunResponseContent(
/// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
/// </param>
/// <param name="metadata">
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
/// </param>
/// <param name="temperature">
/// empty<br/>
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.<br/>
/// Default Value: 1<br/>
/// Example: 1
/// </param>
/// <param name="topP">
/// empty<br/>
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br/>
/// We generally recommend altering this or temperature but not both.<br/>
/// Default Value: 1<br/>
/// Example: 1
/// </param>
Expand All @@ -220,24 +228,26 @@ partial void ProcessCreateRunResponseContent(
/// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
/// </param>
/// <param name="parallelToolCalls">
/// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.
/// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.<br/>
/// Default Value: true
/// </param>
/// <param name="responseFormat">
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.<br/>
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.<br/>
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::G.RunObject> CreateRunAsync(
string threadId,
string assistantId,
global::System.Collections.Generic.IList<global::G.CreateRunIncludeItem>? include = default,
global::G.AnyOf<string, global::G.CreateRunRequestModel?>? model = default,
string? instructions = default,
string? additionalInstructions = default,
global::System.Collections.Generic.IList<global::G.CreateMessageRequest>? additionalMessages = default,
global::System.Collections.Generic.IList<global::G.ToolsItem5>? tools = default,
global::System.Collections.Generic.IList<global::G.ToolsItem4>? tools = default,
object? metadata = default,
double? temperature = default,
double? topP = default,
Expand Down Expand Up @@ -272,6 +282,7 @@ partial void ProcessCreateRunResponseContent(

return await CreateRunAsync(
threadId: threadId,
include: include,
request: __request,
cancellationToken: cancellationToken).ConfigureAwait(false);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ partial void ProcessCreateThreadResponseContent(
/// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
/// </param>
/// <param name="metadata">
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
Expand Down
Loading

0 comments on commit 251fc04

Please sign in to comment.