fix: Fixed missing xml docs for clients. #62
main.yml
on: push
Build, test and publish
/
Build, test and publish
3m 21s
Annotations
2 errors
OpenAi
Test method OpenApiGenerator.UnitTests.ClientTests.OpenAi threw exception:
VerifyException: Directory: /home/runner/work/OpenApiGenerator/OpenApiGenerator/src/tests/OpenApiGenerator.UnitTests/Snapshots/Clients/OpenAi
NotEqual:
- Received: _.received.txt
Verified: _.verified.txt
FileContent:
NotEqual:
Received: _.received.txt
[
{
Id: CreateChatCompletion,
Namespace: G,
ClassName: ChatClient,
BaseUrl: ,
Stream: false,
Path: "/chat/completions",
AuthorizationScheme: ,
Properties: [
{
Id: messages,
Name: Messages,
Type: {
CSharpType: global::System.Collections.Generic.IList<ChatCompletionRequestMessage>,
IsArray: true,
IsEnum: false,
Properties: null,
EnumValues: null
},
IsRequired: true,
Summary: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).,
ParameterName: messages,
ArgumentName: messages,
ParameterDefaultValue: default
},
{
Id: model,
Name: Model,
Type: {
CSharpType: object,
IsArray: false,
IsEnum: false,
Properties: null,
EnumValues: null
},
IsRequired: true,
Summary:
ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
<br/>Example: gpt-4-turbo,
ParameterName: model,
ArgumentName: model,
ParameterDefaultValue: default
},
{
Id: frequency_penalty,
Name: FrequencyPenalty,
Type: {
CSharpType: double?,
IsArray: false,
IsEnum: false,
Properties: null,
EnumValues: null
},
IsRequired: false,
DefaultValue: 0,
Summary:
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
<br/>Default Value: 0,
ParameterName: frequencyPenalty,
ArgumentName: frequencyPenalty,
ParameterDefaultValue: 0
},
{
Id: logit_bias,
Name: LogitBias,
Type: {
CSharpType: object?,
IsArray: false,
IsEnum: false,
Properties: null,
EnumValues: null
},
IsRequired: false,
DefaultValue: ,
Summary:
Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
<br/>Default Value: ,
ParameterName: logitBias,
ArgumentName: logitBias,
ParameterDefaultValue: default
},
{
Id: logprobs,
Name: Logprobs,
Type: {
CSharpType: bool?,
IsArray: false,
IsEnum: false,
Properties: null,
EnumValues: null
},
IsRequired: false,
DefaultValue: false,
Summary:
Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
<br/>Default Value: false,
ParameterName: logprobs,
ArgumentName: logprobs,
ParameterDefaultValue: false
},
{
Id: top_logprobs,
Name: TopLogprobs,
Type: {
CSharpType: int?,
IsArray: false,
IsEnum:
|
Build, test and publish / Build, test and publish
Process completed with exit code 1.
|