Skip to content

Commit

Permalink
Update to 1.3 and minor fix documentation errors
Browse files Browse the repository at this point in the history
  • Loading branch information
OkGoDoIt committed Jan 12, 2023
1 parent f4d9d9d commit 0cfbd73
Show file tree
Hide file tree
Showing 8 changed files with 49 additions and 39 deletions.
18 changes: 9 additions & 9 deletions OpenAI_API/Completions/CompletionEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ internal CompletionEndpoint(OpenAIAPI api)
/// Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Choices"/> property for the completions.</returns>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
public async Task<CompletionResult> CreateCompletionAsync(CompletionRequest request)
{
if (Api.Auth?.ApiKey is null)
Expand Down Expand Up @@ -83,7 +83,7 @@ public async Task<CompletionResult> CreateCompletionAsync(CompletionRequest requ
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <param name="numOutputs">Overrides <see cref="CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Choices"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
public Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request, int numOutputs = 5)
{
request.NumChoicesPerPrompt = numOutputs;
Expand All @@ -94,17 +94,17 @@ public Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request,
/// Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
/// </summary>
/// <param name="prompt">The prompt to generate from</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Choices"/> property for the completions.</returns>
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
public Task<CompletionResult> CreateCompletionAsync(string prompt,
Model model = null,
int? max_tokens = null,
Expand Down Expand Up @@ -232,11 +232,11 @@ public async Task StreamCompletionAsync(CompletionRequest request, Action<Comple
}

/// <summary>
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
/// Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
/// </summary>
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
/// <returns>An async enumerable with each of the results as they come in. See <seealso cref="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
public async IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(CompletionRequest request)
{
if (Api.Auth?.ApiKey is null)
Expand Down Expand Up @@ -293,14 +293,14 @@ public async IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
/// </summary>
/// <param name="prompt">The prompt to generate from</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
Expand Down
8 changes: 4 additions & 4 deletions OpenAI_API/Completions/CompletionRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace OpenAI_API
public class CompletionRequest
{
/// <summary>
/// ID of the model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.
/// ID of the model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.
/// </summary>
[JsonProperty("model")]
public string Model { get; set; }
Expand Down Expand Up @@ -103,7 +103,7 @@ public string Prompt
public bool Stream { get; internal set; } = false;

/// <summary>
/// Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 5, the API will return a list of the 5 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5.
/// Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 5, the API will return a list of the 5 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5.
/// </summary>
[JsonProperty("logprobs")]
public int? Logprobs { get; set; }
Expand Down Expand Up @@ -209,15 +209,15 @@ public CompletionRequest(params string[] prompts)
/// Creates a new <see cref="CompletionRequest"/> with the specified parameters
/// </summary>
/// <param name="prompt">The prompt to generate from</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
/// <param name="suffix">The suffix that comes after a completion of inserted text</param>
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
public CompletionRequest(
Expand Down
2 changes: 0 additions & 2 deletions OpenAI_API/Model/Model.cs
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ public static implicit operator Model(string name)
/// Represents an Model with the given id/<see cref="ModelID"/>
/// </summary>
/// <param name="name">The id/<see cref="ModelID"/> to use.
/// If the <paramref name="name"/> contains a colon (as is the case in the API's <see cref="CompletionResult.Model"/> response),
/// the part before the colon is treated as the id/<see cref="ModelID"/> and the following portion is considered the <see cref="ModelRevision"/>
/// </param>
public Model(string name)
{
Expand Down
16 changes: 8 additions & 8 deletions OpenAI_API/Model/ModelsEndpoint.cs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ public Task<List<Model>> GetModelsAsync()
}

/// <summary>
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.Owner"/> and <see cref="Model.Ready"/>
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.OwnedBy"/> and permissions.
/// </summary>
/// <param name="id">The id/name of the model to get more details about</param>
/// <returns>Asynchronously returns the <see cref="Model"/> with all available properties</returns>
Expand Down Expand Up @@ -75,13 +75,13 @@ public static async Task<List<Model>> GetModelsAsync(APIAuthentication auth = nu
}
}

/// <summary>
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.Owner"/> and <see cref="Model.Ready"/>
/// </summary>
/// <param name="id">The id/name of the model to get more details about</param>
/// <param name="auth">API authentication in order to call the API endpoint. If not specified, attempts to use a default.</param>
/// <returns>Asynchronously returns the <see cref="Model"/> with all available properties</returns>
public static async Task<Model> RetrieveModelDetailsAsync(string id, APIAuthentication auth = null)
/// <summary>
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.OwnedBy"/> and permissions.
/// </summary>
/// <param name="id">The id/name of the model to get more details about</param>
/// <param name="auth">API authentication in order to call the API endpoint. If not specified, attempts to use a default.</param>
/// <returns>Asynchronously returns the <see cref="Model"/> with all available properties</returns>
public static async Task<Model> RetrieveModelDetailsAsync(string id, APIAuthentication auth = null)
{
if (auth.ThisOrDefault()?.ApiKey is null)
{
Expand Down
Loading

0 comments on commit 0cfbd73

Please sign in to comment.