diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 56f2b9d..93b1bc2 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -338,90 +338,106 @@ components: type: integer nullable: true description: | - Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + Sets the random number seed to use for generation. Setting this to a specific number will make the model + generate the same text for the same prompt. (Default: 0) num_predict: type: integer nullable: true description: | - Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + Maximum number of tokens to predict when generating text. + (Default: 128, -1 = infinite generation, -2 = fill context) top_k: type: integer nullable: true description: | - Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, + while a lower value (e.g. 10) will be more conservative. (Default: 40) top_p: type: number format: float nullable: true description: | - Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value + (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) tfs_z: type: number format: float nullable: true description: | - Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value + (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) typical_p: type: number format: float nullable: true description: | - Typical p is used to reduce the impact of less probable tokens from the output. + Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) repeat_last_n: type: integer nullable: true description: | - Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + Sets how far back for the model to look back to prevent repetition. + (Default: 64, 0 = disabled, -1 = num_ctx) temperature: type: number format: float nullable: true description: | - The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + The temperature of the model. Increasing the temperature will make the model answer more creatively. + (Default: 0.8) repeat_penalty: type: number format: float nullable: true description: | - Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more + strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) presence_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + Positive values penalize new tokens based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. (Default: 0) frequency_penalty: type: number format: float nullable: true description: | - Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. (Default: 0) mirostat: type: integer nullable: true description: | - Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + Enable Mirostat sampling for controlling perplexity. + (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) mirostat_tau: type: number format: float nullable: true description: | - Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + Controls the balance between coherence and diversity of the output. A lower value will result in more + focused and coherent text. (Default: 5.0) mirostat_eta: type: number format: float nullable: true description: | - Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate + will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. + (Default: 0.1) penalize_newline: type: boolean nullable: true description: | - Penalize newlines in the output. (Default: false) + Penalize newlines in the output. (Default: true) stop: type: array nullable: true - description: Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + description: | + Sequences where the API will stop generating further tokens. The returned text will not contain the stop + sequence. items: type: string numa: @@ -433,17 +449,18 @@ components: type: integer nullable: true description: | - Sets the size of the context window used to generate the next token. + Sets the size of the context window used to generate the next token. (Default: 2048) num_batch: type: integer nullable: true description: | - Sets the number of batches to use for generation. (Default: 1) + Sets the number of batches to use for generation. (Default: 512) num_gpu: type: integer nullable: true description: | - The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + The number of layers to send to the GPU(s). + On macOS it defaults to 1 to enable metal support, 0 to disable. main_gpu: type: integer nullable: true @@ -483,7 +500,9 @@ components: type: integer nullable: true description: | - Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + Sets the number of threads to use during computation. By default, Ollama will detect this for optimal + performance. It is recommended to set this value to the number of physical CPU cores your system has + (as opposed to the logical number of cores). ResponseFormat: type: string description: | diff --git a/src/libs/Directory.Build.props b/src/libs/Directory.Build.props index ff8b61a..19c3e01 100644 --- a/src/libs/Directory.Build.props +++ b/src/libs/Directory.Build.props @@ -14,7 +14,7 @@ - 1.4.1 + 1.4.2 true true tryAGI and contributors diff --git a/src/libs/Ollama/Generated/JsonSerializerContext.g.cs b/src/libs/Ollama/Generated/JsonSerializerContext.g.cs index b5116b9..a2c951f 100644 --- a/src/libs/Ollama/Generated/JsonSerializerContext.g.cs +++ b/src/libs/Ollama/Generated/JsonSerializerContext.g.cs @@ -2,6 +2,7 @@ #nullable enable #pragma warning disable CS0618 // Type or member is obsolete +#pragma warning disable CS3016 // Arrays as attribute arguments is not CLS-compliant namespace Ollama { diff --git a/src/libs/Ollama/Generated/Ollama.ChatClient.GenerateChatCompletion.g.cs b/src/libs/Ollama/Generated/Ollama.ChatClient.GenerateChatCompletion.g.cs index 027b393..c5dd11e 100644 --- a/src/libs/Ollama/Generated/Ollama.ChatClient.GenerateChatCompletion.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ChatClient.GenerateChatCompletion.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ChatClient { + partial void PrepareGenerateChatCompletionArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.GenerateChatCompletionRequest request); + partial void PrepareGenerateChatCompletionRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.GenerateChatCompletionRequest request); + partial void ProcessGenerateChatCompletionResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Generate the next message in a chat with a provided model.
/// This is a streaming endpoint, so there will be a series of responses. The final response object will include statistics and additional data from the request. @@ -18,6 +29,12 @@ public partial class ChatClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareGenerateChatCompletionArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/chat", global::System.UriKind.RelativeOrAbsolute)); @@ -27,10 +44,25 @@ public partial class ChatClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareGenerateChatCompletionRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessGenerateChatCompletionResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); using var stream = await response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); diff --git a/src/libs/Ollama/Generated/Ollama.ChatClient.g.cs b/src/libs/Ollama/Generated/Ollama.ChatClient.g.cs index db65488..88f1f1c 100644 --- a/src/libs/Ollama/Generated/Ollama.ChatClient.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ChatClient.g.cs @@ -39,5 +39,18 @@ public void Dispose() { _httpClient.Dispose(); } + + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); } } \ No newline at end of file diff --git a/src/libs/Ollama/Generated/Ollama.CompletionsClient.GenerateCompletion.g.cs b/src/libs/Ollama/Generated/Ollama.CompletionsClient.GenerateCompletion.g.cs index 2f94d92..a3c6a5c 100644 --- a/src/libs/Ollama/Generated/Ollama.CompletionsClient.GenerateCompletion.g.cs +++ b/src/libs/Ollama/Generated/Ollama.CompletionsClient.GenerateCompletion.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class CompletionsClient { + partial void PrepareGenerateCompletionArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.GenerateCompletionRequest request); + partial void PrepareGenerateCompletionRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.GenerateCompletionRequest request); + partial void ProcessGenerateCompletionResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Generate a response for a given prompt with a provided model.
/// The final response object will include statistics and additional data from the request. @@ -18,6 +29,12 @@ public partial class CompletionsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareGenerateCompletionArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/generate", global::System.UriKind.RelativeOrAbsolute)); @@ -27,10 +44,25 @@ public partial class CompletionsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareGenerateCompletionRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessGenerateCompletionResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); using var stream = await response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); diff --git a/src/libs/Ollama/Generated/Ollama.CompletionsClient.g.cs b/src/libs/Ollama/Generated/Ollama.CompletionsClient.g.cs index c016186..969296d 100644 --- a/src/libs/Ollama/Generated/Ollama.CompletionsClient.g.cs +++ b/src/libs/Ollama/Generated/Ollama.CompletionsClient.g.cs @@ -39,5 +39,18 @@ public void Dispose() { _httpClient.Dispose(); } + + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); } } \ No newline at end of file diff --git a/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.GenerateEmbedding.g.cs b/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.GenerateEmbedding.g.cs index 8b46f20..bb3415c 100644 --- a/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.GenerateEmbedding.g.cs +++ b/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.GenerateEmbedding.g.cs @@ -5,6 +5,22 @@ namespace Ollama { public partial class EmbeddingsClient { + partial void PrepareGenerateEmbeddingArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.GenerateEmbeddingRequest request); + partial void PrepareGenerateEmbeddingRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.GenerateEmbeddingRequest request); + partial void ProcessGenerateEmbeddingResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessGenerateEmbeddingResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + /// /// Generate embeddings from a model. /// @@ -17,6 +33,12 @@ public partial class EmbeddingsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareGenerateEmbeddingArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/embeddings", global::System.UriKind.RelativeOrAbsolute)); @@ -26,13 +48,37 @@ public partial class EmbeddingsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareGenerateEmbeddingRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + ProcessResponse( + client: _httpClient, + response: response); + ProcessGenerateEmbeddingResponse( + httpClient: _httpClient, + httpResponseMessage: response); + var __content = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + ProcessResponseContent( + client: _httpClient, + response: response, + content: ref __content); + ProcessGenerateEmbeddingResponseContent( + httpClient: _httpClient, + httpResponseMessage: response, + content: ref __content); + try { response.EnsureSuccessStatusCode(); diff --git a/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.g.cs b/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.g.cs index 5148e23..cc5eeda 100644 --- a/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.g.cs +++ b/src/libs/Ollama/Generated/Ollama.EmbeddingsClient.g.cs @@ -39,5 +39,18 @@ public void Dispose() { _httpClient.Dispose(); } + + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); } } \ No newline at end of file diff --git a/src/libs/Ollama/Generated/Ollama.Models.RequestOptions.g.cs b/src/libs/Ollama/Generated/Ollama.Models.RequestOptions.g.cs index a11326b..921b5cd 100644 --- a/src/libs/Ollama/Generated/Ollama.Models.RequestOptions.g.cs +++ b/src/libs/Ollama/Generated/Ollama.Models.RequestOptions.g.cs @@ -15,97 +15,112 @@ public sealed partial class RequestOptions public int? NumKeep { get; set; } /// - /// Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) + /// Sets the random number seed to use for generation. Setting this to a specific number will make the model
+ /// generate the same text for the same prompt. (Default: 0) ///
[global::System.Text.Json.Serialization.JsonPropertyName("seed")] public int? Seed { get; set; } /// - /// Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) + /// Maximum number of tokens to predict when generating text.
+ /// (Default: 128, -1 = infinite generation, -2 = fill context) ///
[global::System.Text.Json.Serialization.JsonPropertyName("num_predict")] public int? NumPredict { get; set; } /// - /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) + /// Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers,
+ /// while a lower value (e.g. 10) will be more conservative. (Default: 40) ///
[global::System.Text.Json.Serialization.JsonPropertyName("top_k")] public int? TopK { get; set; } /// - /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) + /// Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value
+ /// (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) ///
[global::System.Text.Json.Serialization.JsonPropertyName("top_p")] public float? TopP { get; set; } /// - /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) + /// Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value
+ /// (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) ///
[global::System.Text.Json.Serialization.JsonPropertyName("tfs_z")] public float? TfsZ { get; set; } /// - /// Typical p is used to reduce the impact of less probable tokens from the output. + /// Typical p is used to reduce the impact of less probable tokens from the output. (default: 1) /// [global::System.Text.Json.Serialization.JsonPropertyName("typical_p")] public float? TypicalP { get; set; } /// - /// Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) + /// Sets how far back for the model to look back to prevent repetition.
+ /// (Default: 64, 0 = disabled, -1 = num_ctx) ///
[global::System.Text.Json.Serialization.JsonPropertyName("repeat_last_n")] public int? RepeatLastN { get; set; } /// - /// The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) + /// The temperature of the model. Increasing the temperature will make the model answer more creatively.
+ /// (Default: 0.8) ///
[global::System.Text.Json.Serialization.JsonPropertyName("temperature")] public float? Temperature { get; set; } /// - /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) + /// Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more
+ /// strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) ///
[global::System.Text.Json.Serialization.JsonPropertyName("repeat_penalty")] public float? RepeatPenalty { get; set; } /// - /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Positive values penalize new tokens based on whether they appear in the text so far, increasing the
+ /// model's likelihood to talk about new topics. (Default: 0) ///
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")] public float? PresencePenalty { get; set; } /// - /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the
+ /// model's likelihood to repeat the same line verbatim. (Default: 0) ///
[global::System.Text.Json.Serialization.JsonPropertyName("frequency_penalty")] public float? FrequencyPenalty { get; set; } /// - /// Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) + /// Enable Mirostat sampling for controlling perplexity.
+ /// (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) ///
[global::System.Text.Json.Serialization.JsonPropertyName("mirostat")] public int? Mirostat { get; set; } /// - /// Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) + /// Controls the balance between coherence and diversity of the output. A lower value will result in more
+ /// focused and coherent text. (Default: 5.0) ///
[global::System.Text.Json.Serialization.JsonPropertyName("mirostat_tau")] public float? MirostatTau { get; set; } /// - /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) + /// Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate
+ /// will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.
+ /// (Default: 0.1) ///
[global::System.Text.Json.Serialization.JsonPropertyName("mirostat_eta")] public float? MirostatEta { get; set; } /// - /// Penalize newlines in the output. (Default: false) + /// Penalize newlines in the output. (Default: true) /// [global::System.Text.Json.Serialization.JsonPropertyName("penalize_newline")] public bool? PenalizeNewline { get; set; } /// - /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + /// Sequences where the API will stop generating further tokens. The returned text will not contain the stop
+ /// sequence. ///
[global::System.Text.Json.Serialization.JsonPropertyName("stop")] public global::System.Collections.Generic.IList? Stop { get; set; } @@ -117,19 +132,20 @@ public sealed partial class RequestOptions public bool? Numa { get; set; } /// - /// Sets the size of the context window used to generate the next token. + /// Sets the size of the context window used to generate the next token. (Default: 2048) /// [global::System.Text.Json.Serialization.JsonPropertyName("num_ctx")] public int? NumCtx { get; set; } /// - /// Sets the number of batches to use for generation. (Default: 1) + /// Sets the number of batches to use for generation. (Default: 512) /// [global::System.Text.Json.Serialization.JsonPropertyName("num_batch")] public int? NumBatch { get; set; } /// - /// The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. + /// The number of layers to send to the GPU(s).
+ /// On macOS it defaults to 1 to enable metal support, 0 to disable. ///
[global::System.Text.Json.Serialization.JsonPropertyName("num_gpu")] public int? NumGpu { get; set; } @@ -177,7 +193,9 @@ public sealed partial class RequestOptions public bool? UseMlock { get; set; } /// - /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). + /// Sets the number of threads to use during computation. By default, Ollama will detect this for optimal
+ /// performance. It is recommended to set this value to the number of physical CPU cores your system has
+ /// (as opposed to the logical number of cores). ///
[global::System.Text.Json.Serialization.JsonPropertyName("num_thread")] public int? NumThread { get; set; } diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.CheckBlob.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.CheckBlob.g.cs index 4bd65d4..8e1a0fe 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.CheckBlob.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.CheckBlob.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareCheckBlobArguments( + global::System.Net.Http.HttpClient httpClient, + ref string digest); + partial void PrepareCheckBlobRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string digest); + partial void ProcessCheckBlobResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Ensures that the file blob used for a FROM or ADAPTER field exists on the server.
/// This is checking your Ollama server and not Ollama.ai. @@ -16,14 +27,35 @@ public partial class ModelsClient string digest, global::System.Threading.CancellationToken cancellationToken = default) { + PrepareArguments( + client: _httpClient); + PrepareCheckBlobArguments( + httpClient: _httpClient, + digest: ref digest); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Head, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + $"/blobs/{digest}", global::System.UriKind.RelativeOrAbsolute)); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareCheckBlobRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + digest: digest); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessCheckBlobResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); } } diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.CopyModel.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.CopyModel.g.cs index 444cb4d..b4ea103 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.CopyModel.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.CopyModel.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareCopyModelArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.CopyModelRequest request); + partial void PrepareCopyModelRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.CopyModelRequest request); + partial void ProcessCopyModelResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Creates a model with another name from an existing model. /// @@ -17,6 +28,12 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareCopyModelArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/copy", global::System.UriKind.RelativeOrAbsolute)); @@ -26,10 +43,25 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareCopyModelRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessCopyModelResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); } diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateBlob.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateBlob.g.cs index 17fee15..252918b 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateBlob.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateBlob.g.cs @@ -5,6 +5,19 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareCreateBlobArguments( + global::System.Net.Http.HttpClient httpClient, + ref string digest, + byte[] request); + partial void PrepareCreateBlobRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string digest, + byte[] request); + partial void ProcessCreateBlobResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Create a blob from a file. Returns the server file path. /// @@ -19,6 +32,13 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareCreateBlobArguments( + httpClient: _httpClient, + digest: ref digest, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + $"/blobs/{digest}", global::System.UriKind.RelativeOrAbsolute)); @@ -28,10 +48,26 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareCreateBlobRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + digest: digest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessCreateBlobResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); } diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateModel.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateModel.g.cs index 09a4df6..3a74055 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateModel.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.CreateModel.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareCreateModelArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.CreateModelRequest request); + partial void PrepareCreateModelRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.CreateModelRequest request); + partial void ProcessCreateModelResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Create a model from a Modelfile.
/// It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation should also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using Create a Blob and the value to the path indicated in the response. @@ -18,6 +29,12 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareCreateModelArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/create", global::System.UriKind.RelativeOrAbsolute)); @@ -27,10 +44,25 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareCreateModelRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessCreateModelResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); using var stream = await response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.DeleteModel.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.DeleteModel.g.cs index 3449074..958ae70 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.DeleteModel.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.DeleteModel.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareDeleteModelArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.DeleteModelRequest request); + partial void PrepareDeleteModelRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.DeleteModelRequest request); + partial void ProcessDeleteModelResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Delete a model and its data. /// @@ -17,6 +28,12 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareDeleteModelArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Delete, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/delete", global::System.UriKind.RelativeOrAbsolute)); @@ -26,10 +43,25 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareDeleteModelRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessDeleteModelResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); } diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.ListModels.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.ListModels.g.cs index f7bfde9..4fb2032 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.ListModels.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.ListModels.g.cs @@ -5,6 +5,20 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareListModelsArguments( + global::System.Net.Http.HttpClient httpClient); + partial void PrepareListModelsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage); + partial void ProcessListModelsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessListModelsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + /// /// List models that are available locally. /// @@ -13,17 +27,45 @@ public partial class ModelsClient public async global::System.Threading.Tasks.Task ListModelsAsync( global::System.Threading.CancellationToken cancellationToken = default) { + PrepareArguments( + client: _httpClient); + PrepareListModelsArguments( + httpClient: _httpClient); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Get, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/tags", global::System.UriKind.RelativeOrAbsolute)); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareListModelsRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + ProcessResponse( + client: _httpClient, + response: response); + ProcessListModelsResponse( + httpClient: _httpClient, + httpResponseMessage: response); + var __content = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + ProcessResponseContent( + client: _httpClient, + response: response, + content: ref __content); + ProcessListModelsResponseContent( + httpClient: _httpClient, + httpResponseMessage: response, + content: ref __content); + try { response.EnsureSuccessStatusCode(); diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.ListRunningModels.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.ListRunningModels.g.cs index df252eb..af90b78 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.ListRunningModels.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.ListRunningModels.g.cs @@ -5,6 +5,20 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareListRunningModelsArguments( + global::System.Net.Http.HttpClient httpClient); + partial void PrepareListRunningModelsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage); + partial void ProcessListRunningModelsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessListRunningModelsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + /// /// List models that are running. /// @@ -13,17 +27,45 @@ public partial class ModelsClient public async global::System.Threading.Tasks.Task ListRunningModelsAsync( global::System.Threading.CancellationToken cancellationToken = default) { + PrepareArguments( + client: _httpClient); + PrepareListRunningModelsArguments( + httpClient: _httpClient); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Get, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/ps", global::System.UriKind.RelativeOrAbsolute)); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareListRunningModelsRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + ProcessResponse( + client: _httpClient, + response: response); + ProcessListRunningModelsResponse( + httpClient: _httpClient, + httpResponseMessage: response); + var __content = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + ProcessResponseContent( + client: _httpClient, + response: response, + content: ref __content); + ProcessListRunningModelsResponseContent( + httpClient: _httpClient, + httpResponseMessage: response, + content: ref __content); + try { response.EnsureSuccessStatusCode(); diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.PullModel.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.PullModel.g.cs index 520a1cb..7908f92 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.PullModel.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.PullModel.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ModelsClient { + partial void PreparePullModelArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.PullModelRequest request); + partial void PreparePullModelRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.PullModelRequest request); + partial void ProcessPullModelResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Download a model from the ollama library.
/// Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress. @@ -18,6 +29,12 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PreparePullModelArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/pull", global::System.UriKind.RelativeOrAbsolute)); @@ -27,10 +44,25 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PreparePullModelRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessPullModelResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); using var stream = await response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.PushModel.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.PushModel.g.cs index 912ac8a..e8a1848 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.PushModel.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.PushModel.g.cs @@ -5,6 +5,17 @@ namespace Ollama { public partial class ModelsClient { + partial void PreparePushModelArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.PushModelRequest request); + partial void PreparePushModelRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.PushModelRequest request); + partial void ProcessPushModelResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + /// /// Upload a model to a model library.
/// Requires registering for ollama.ai and adding a public key first. @@ -18,6 +29,12 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PreparePushModelArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/push", global::System.UriKind.RelativeOrAbsolute)); @@ -27,10 +44,25 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PreparePushModelRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseHeadersRead, cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: _httpClient, + response: response); + ProcessPushModelResponse( + httpClient: _httpClient, + httpResponseMessage: response); response.EnsureSuccessStatusCode(); using var stream = await response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.ShowModelInfo.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.ShowModelInfo.g.cs index d0482aa..8d3812e 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.ShowModelInfo.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.ShowModelInfo.g.cs @@ -5,6 +5,22 @@ namespace Ollama { public partial class ModelsClient { + partial void PrepareShowModelInfoArguments( + global::System.Net.Http.HttpClient httpClient, + global::Ollama.ModelInfoRequest request); + partial void PrepareShowModelInfoRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + global::Ollama.ModelInfoRequest request); + partial void ProcessShowModelInfoResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessShowModelInfoResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + /// /// Show details about a model including modelfile, template, parameters, license, and system prompt. /// @@ -17,6 +33,12 @@ public partial class ModelsClient { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + PrepareArguments( + client: _httpClient); + PrepareShowModelInfoArguments( + httpClient: _httpClient, + request: request); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Post, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/show", global::System.UriKind.RelativeOrAbsolute)); @@ -26,13 +48,37 @@ public partial class ModelsClient encoding: global::System.Text.Encoding.UTF8, mediaType: "application/json"); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareShowModelInfoRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest, + request: request); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + ProcessResponse( + client: _httpClient, + response: response); + ProcessShowModelInfoResponse( + httpClient: _httpClient, + httpResponseMessage: response); + var __content = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + ProcessResponseContent( + client: _httpClient, + response: response, + content: ref __content); + ProcessShowModelInfoResponseContent( + httpClient: _httpClient, + httpResponseMessage: response, + content: ref __content); + try { response.EnsureSuccessStatusCode(); diff --git a/src/libs/Ollama/Generated/Ollama.ModelsClient.g.cs b/src/libs/Ollama/Generated/Ollama.ModelsClient.g.cs index 9fd1387..33495c6 100644 --- a/src/libs/Ollama/Generated/Ollama.ModelsClient.g.cs +++ b/src/libs/Ollama/Generated/Ollama.ModelsClient.g.cs @@ -39,5 +39,18 @@ public void Dispose() { _httpClient.Dispose(); } + + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); } } \ No newline at end of file diff --git a/src/libs/Ollama/Generated/Ollama.OllamaApiClient.GetVersion.g.cs b/src/libs/Ollama/Generated/Ollama.OllamaApiClient.GetVersion.g.cs index c90c4c3..0c87693 100644 --- a/src/libs/Ollama/Generated/Ollama.OllamaApiClient.GetVersion.g.cs +++ b/src/libs/Ollama/Generated/Ollama.OllamaApiClient.GetVersion.g.cs @@ -5,6 +5,20 @@ namespace Ollama { public partial class OllamaApiClient { + partial void PrepareGetVersionArguments( + global::System.Net.Http.HttpClient httpClient); + partial void PrepareGetVersionRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage); + partial void ProcessGetVersionResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessGetVersionResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + /// /// Returns the version of the Ollama server.
/// This endpoint returns the version of the Ollama server. @@ -14,17 +28,45 @@ public partial class OllamaApiClient public async global::System.Threading.Tasks.Task GetVersionAsync( global::System.Threading.CancellationToken cancellationToken = default) { + PrepareArguments( + client: _httpClient); + PrepareGetVersionArguments( + httpClient: _httpClient); + using var httpRequest = new global::System.Net.Http.HttpRequestMessage( method: global::System.Net.Http.HttpMethod.Get, requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + "/version", global::System.UriKind.RelativeOrAbsolute)); + PrepareRequest( + client: _httpClient, + request: httpRequest); + PrepareGetVersionRequest( + httpClient: _httpClient, + httpRequestMessage: httpRequest); + using var response = await _httpClient.SendAsync( request: httpRequest, completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, cancellationToken: cancellationToken).ConfigureAwait(false); + ProcessResponse( + client: _httpClient, + response: response); + ProcessGetVersionResponse( + httpClient: _httpClient, + httpResponseMessage: response); + var __content = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + ProcessResponseContent( + client: _httpClient, + response: response, + content: ref __content); + ProcessGetVersionResponseContent( + httpClient: _httpClient, + httpResponseMessage: response, + content: ref __content); + try { response.EnsureSuccessStatusCode(); diff --git a/src/libs/Ollama/Generated/Ollama.OllamaApiClient.g.cs b/src/libs/Ollama/Generated/Ollama.OllamaApiClient.g.cs index 4ff41d4..9607054 100644 --- a/src/libs/Ollama/Generated/Ollama.OllamaApiClient.g.cs +++ b/src/libs/Ollama/Generated/Ollama.OllamaApiClient.g.cs @@ -59,5 +59,18 @@ public void Dispose() { _httpClient.Dispose(); } + + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); } } \ No newline at end of file