Skip to content

Commit

Permalink
fix: Fixed issues with ReadOnly/WriteOnly schemas.
Browse files Browse the repository at this point in the history
  • Loading branch information
HavenDV committed Sep 22, 2024
1 parent d1dc2ed commit b383731
Show file tree
Hide file tree
Showing 362 changed files with 2,231 additions and 2,448 deletions.
19 changes: 19 additions & 0 deletions src/libs/AutoSDK/Extensions/OpenApiExtensions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,25 @@ public static string GetSummary(this OpenApiSchema schema)

summary += $"Default Value: {@default}";
}

if (schema.ReadOnly)
{
if (!string.IsNullOrWhiteSpace(summary))
{
summary += "\n";
}

summary += "Included only in responses";
}
if (schema.WriteOnly)
{
if (!string.IsNullOrWhiteSpace(summary))
{
summary += "\n";
}

summary += "Included only in requests";
}

var example = schema.Example.GetString()?.ClearForXml();
if (!string.IsNullOrWhiteSpace(example))
Expand Down
14 changes: 12 additions & 2 deletions src/libs/AutoSDK/Models/EndPoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,24 @@ public static EndPoint FromSchema(OperationContext operation)
requestContext?.ClassData?.Properties ??
[])
{
if (requestProperty.IsReadOnly)
{
continue;
}

parameters.Add(MethodParameter.Default with
{
Id = requestProperty.Id,
Name = parameters.All(x => x.Name != requestProperty.Name)
? requestProperty.Name
: $"request{requestProperty.Name.ToPropertyName()}",
Type = requestProperty.Type,
IsRequired = requestProperty.IsRequired,// is { IsRequired: true, IsReadOnly: false },
Type = requestProperty.Type with
{
CSharpTypeNullability =
requestProperty.Type.IsNullable ||
(requestProperty.Type.CSharpTypeNullability && !requestProperty.IsRequired && !requestProperty.IsWriteOnly),
},
IsRequired = requestProperty.IsRequired || requestProperty.IsWriteOnly,
IsMultiPartFormDataFilename = requestProperty.IsMultiPartFormDataFilename,
DefaultValue = requestProperty.DefaultValue,
IsDeprecated = requestProperty.IsDeprecated,
Expand Down
15 changes: 10 additions & 5 deletions src/libs/AutoSDK/Models/PropertyData.cs
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ public static PropertyData FromSchemaContext(SchemaContext context)
: [];

var isRequired =
requiredProperties.Contains(propertyName);// &&
//context.Schema is { WriteOnly: false };
requiredProperties.Contains(propertyName) &&
context.Schema is { WriteOnly: false };
// Special case for enums with a single value.
if (isRequired && type is { IsEnum: true, EnumValues.Length: 1 })
{
Expand All @@ -74,14 +74,19 @@ public static PropertyData FromSchemaContext(SchemaContext context)
return new PropertyData(
Id: propertyName,
Name: name,
Type: type,
IsRequired: isRequired,
Type: type with
{
CSharpTypeNullability = type.CSharpTypeNullability || context.Schema is { WriteOnly: true },
},
IsRequired: isRequired && context.Schema is { ReadOnly: false },
IsReadOnly: context.Schema.ReadOnly,
IsWriteOnly: context.Schema.WriteOnly,
IsMultiPartFormDataFilename: false,
Settings: context.Settings,
IsDeprecated: context.Schema.Deprecated,
DefaultValue: context.GetDefaultValue(),
DefaultValue: context.Schema is { ReadOnly: true } && !type.CSharpTypeNullability
? "default!"
: context.GetDefaultValue(),
Summary: context.Schema.GetSummary(),
ConverterType: type.ConverterType);
}
Expand Down
3 changes: 3 additions & 0 deletions src/libs/AutoSDK/Models/TypeData.cs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ public readonly record struct TypeData(
string CSharpTypeRaw,
bool CSharpTypeNullability,
bool IsArray,
bool IsNullable,
bool IsEnum,
bool IsBase64,
bool IsDate,
Expand All @@ -29,6 +30,7 @@ public readonly record struct TypeData(
CSharpTypeRaw: string.Empty,
CSharpTypeNullability: false,
IsArray: false,
IsNullable: false,
IsEnum: false,
IsBase64: false,
IsDate: false,
Expand Down Expand Up @@ -153,6 +155,7 @@ Default with
CSharpTypeRaw: type,
CSharpTypeNullability: GetCSharpNullability(context),
IsValueType: ContextIsValueType(context),
IsNullable: context.Schema.Nullable,
IsArray: context.Schema.IsArray(),
IsEnum: context.Schema.IsEnum(),
IsBase64: context.Schema.IsBase64(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,24 +191,28 @@ partial void ProcessChatResponseContent(
/// </param>
/// <param name="maxTokens">
/// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="maxInputTokens">
/// The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer.<br/>
/// Input will be truncated according to the `prompt_truncation` parameter.<br/>
/// Compatible Deployments: Cohere Platform
/// Compatible Deployments: Cohere Platform<br/>
/// Included only in requests
/// </param>
/// <param name="k">
/// Ensures only the top `k` most likely tokens are considered for generation at each step.<br/>
/// Defaults to `0`, min value of `0`, max value of `500`.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Default Value: 0
/// Default Value: 0<br/>
/// Included only in requests
/// </param>
/// <param name="p">
/// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.<br/>
/// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Default Value: 0.75
/// Default Value: 0.75<br/>
/// Included only in requests
/// </param>
/// <param name="seed">
/// If specified, the backend will make a best effort to sample tokens<br/>
Expand All @@ -219,17 +223,20 @@ partial void ProcessChatResponseContent(
/// </param>
/// <param name="stopSequences">
/// A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="frequencyPenalty">
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
/// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="presencePenalty">
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.<br/>
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments<br/>
/// Included only in requests
/// </param>
/// <param name="tools">
/// A list of available tools (functions) that the model may suggest invoking before producing a text response.<br/>
Expand Down Expand Up @@ -273,6 +280,13 @@ partial void ProcessChatResponseContent(
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::G.OneOf<global::G.NonStreamedChatResponse, global::G.StreamedChatResponse?>> ChatAsync(
string message,
int maxTokens,
int maxInputTokens,
int k,
double p,
global::System.Collections.Generic.IList<string> stopSequences,
double frequencyPenalty,
double presencePenalty,
string? xClientName = default,
string? model = default,
bool? stream = default,
Expand All @@ -285,14 +299,7 @@ partial void ProcessChatResponseContent(
global::System.Collections.Generic.IList<global::G.ChatDocument>? documents = default,
global::G.ChatRequestCitationQuality? citationQuality = default,
float? temperature = default,
int? maxTokens = default,
int? maxInputTokens = default,
int? k = 0,
double? p = 0.75,
int? seed = default,
global::System.Collections.Generic.IList<string>? stopSequences = default,
double? frequencyPenalty = default,
double? presencePenalty = default,
global::System.Collections.Generic.IList<global::G.Tool>? tools = default,
global::System.Collections.Generic.IList<global::G.ToolResult>? toolResults = default,
bool? forceSingleStep = default,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,34 +116,39 @@ partial void ProcessClassifyResponseContent(
/// <param name="inputs">
/// A list of up to 96 texts to be classified. Each one must be a non-empty string.<br/>
/// There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models).<br/>
/// Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.
/// Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.<br/>
/// Included only in requests
/// </param>
/// <param name="examples">
/// An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.<br/>
/// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
/// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID.
/// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID.<br/>
/// Included only in requests
/// </param>
/// <param name="preset">
/// The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.com/playground/classify?model=large). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters.<br/>
/// Included only in requests<br/>
/// Example: my-preset-a58sbd
/// </param>
/// <param name="truncate">
/// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.<br/>
/// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.<br/>
/// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.<br/>
/// Default Value: END
/// Default Value: END<br/>
/// Included only in requests
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::G.ClassifyResponse> ClassifyAsync(
global::System.Collections.Generic.IList<string> inputs,
global::System.Collections.Generic.IList<global::G.ClassifyExample> examples,
string model,
string preset,
global::G.ClassifyRequestTruncate truncate,
string? xClientName = default,
global::System.Collections.Generic.IList<global::G.ClassifyExample>? examples = default,
string? model = default,
string? preset = default,
global::G.ClassifyRequestTruncate? truncate = global::G.ClassifyRequestTruncate.END,
global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::G.ClassifyRequest
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ partial void ProcessDetokenizeResponseContent(
/// </summary>
/// <param name="xClientName"></param>
/// <param name="tokens">
/// The list of tokens to be detokenized.
/// The list of tokens to be detokenized.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ partial void ProcessEmbedResponseContent(
/// </summary>
/// <param name="xClientName"></param>
/// <param name="texts">
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.<br/>
/// Included only in requests
/// </param>
/// <param name="model">
/// Defaults to embed-english-v2.0<br/>
Expand All @@ -128,7 +129,8 @@ partial void ProcessEmbedResponseContent(
/// * `embed-multilingual-light-v3.0` 384<br/>
/// * `embed-english-v2.0` 4096<br/>
/// * `embed-english-light-v2.0` 1024<br/>
/// * `embed-multilingual-v2.0` 768
/// * `embed-multilingual-v2.0` 768<br/>
/// Included only in requests
/// </param>
/// <param name="inputType">
/// Specifies the type of input passed to the model. Required for embedding models v3 and higher.<br/>
Expand All @@ -143,23 +145,25 @@ partial void ProcessEmbedResponseContent(
/// * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.<br/>
/// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.<br/>
/// * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.<br/>
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.<br/>
/// Included only in requests
/// </param>
/// <param name="truncate">
/// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.<br/>
/// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.<br/>
/// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.<br/>
/// Default Value: END
/// Default Value: END<br/>
/// Included only in requests
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async global::System.Threading.Tasks.Task<global::G.OneOf<global::G.EmbedFloatsResponse, global::G.EmbedByTypeResponse>> EmbedAsync(
global::System.Collections.Generic.IList<string> texts,
string model,
global::System.Collections.Generic.IList<global::G.EmbeddingType> embeddingTypes,
global::G.EmbedRequestTruncate truncate,
string? xClientName = default,
string? model = default,
global::G.EmbedInputType? inputType = default,
global::System.Collections.Generic.IList<global::G.EmbeddingType>? embeddingTypes = default,
global::G.EmbedRequestTruncate? truncate = global::G.EmbedRequestTruncate.END,
global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::G.EmbedRequest
Expand Down
Loading

0 comments on commit b383731

Please sign in to comment.