diff --git a/src/libs/Directory.Build.props b/src/libs/Directory.Build.props index cd18603f..d1e85975 100755 --- a/src/libs/Directory.Build.props +++ b/src/libs/Directory.Build.props @@ -16,7 +16,7 @@ - 2.0.0-alpha.2 + 2.0.0-alpha.3 true true tryAGI and contributors diff --git a/src/libs/OpenAI.Constants/Chat/ChatModel.cs b/src/libs/OpenAI.Constants/Chat/ChatModel.cs index d16b66db..292e3398 100644 --- a/src/libs/OpenAI.Constants/Chat/ChatModel.cs +++ b/src/libs/OpenAI.Constants/Chat/ChatModel.cs @@ -13,40 +13,46 @@ namespace OpenAI.Constants; /// public readonly record struct ChatModel(string Value) { - /// - /// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat.
- /// Will be updated with our latest model iteration 2 weeks after it is released.
- /// Max tokens: 8,192 tokens
- /// Training data: Up to Sep 2021
- /// On June 27th, 2023, gpt-4 will be updated to point from gpt-4-0314 to gpt-4-0613, the latest model iteration. - ///
+ /// + /// Currently points to gpt-4-0613. internal const string Gpt4Value = "gpt-4"; /// - /// Snapshot of gpt-4 from June 13th 2023 with function calling data.
- /// Unlike gpt-4, this model will not receive updates, and will be deprecated 3 months after a new version is released.
+ /// Snapshot of gpt-4 from June 13th 2023 with improved function calling support.
+ /// According https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
/// Max tokens: 8,192 tokens
/// Training data: Up to Sep 2021
///
internal const string Gpt4_0613Value = "gpt-4-0613"; - /// - /// Same capabilities as the base gpt-4 mode but with 4x the context length.
- /// Will be updated with our latest model iteration.
- /// Max tokens: 32,768 tokens
- /// Training data: Up to Sep 2021
- /// On June 27th, 2023, gpt-4 will be updated to point from gpt-4-0314 to gpt-4-0613, the latest model iteration. - ///
+ /// + /// Currently points to gpt-4-32k-0613 internal const string Gpt4_32kValue = "gpt-4-32"; /// - /// Snapshot of gpt-4-32 from June 13th 2023.
- /// Unlike gpt-4-32k, this model will not receive updates, and will be deprecated 3 months after a new version is released.
+ /// Snapshot of gpt-4-32k from June 13th 2023 with improved function calling support.
+ /// According https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
/// Max tokens: 32,768 tokens
/// Training data: Up to Sep 2021
///
internal const string Gpt4_32k_0613Value = "gpt-4-32k-0613"; + /// + /// The latest GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. This preview model is not yet suited for production traffic.
+ /// According https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
+ /// Max tokens: 128,000 tokens
+ /// Training data: Up to Apr 2023
+ ///
+ internal const string Gpt4_1106_PreviewValue = "gpt-4-1106-preview"; + + /// + /// Ability to understand images, in addition to all other GPT-4 Turbo capabilties. Returns a maximum of 4,096 output tokens. This is a preview model version and not suited yet for production traffic.
+ /// According https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
+ /// Max tokens: 128,000 tokens
+ /// Training data: Up to Apr 2023
+ ///
+ internal const string Gpt4VisionPreviewValue = "gpt-4-vision-preview"; + /// /// Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003.
/// Will be updated with our latest model iteration 2 weeks after it is released.
@@ -91,6 +97,12 @@ public readonly record struct ChatModel(string Value) /// public static ChatModel Gpt4_32k_0613 { get; } = new(Gpt4_32k_0613Value); + /// + public static ChatModel Gpt4_1106_Preview { get; } = new(Gpt4_1106_PreviewValue); + + /// + public static ChatModel Gpt4VisionPreview { get; } = new(Gpt4VisionPreviewValue); + /// public static ChatModel Gpt35Turbo { get; } = new(Gpt35TurboValue); diff --git a/src/libs/OpenAI.Constants/Chat/ChatPrices.cs b/src/libs/OpenAI.Constants/Chat/ChatPrices.cs index 2f61909a..7dd2eb2f 100644 --- a/src/libs/OpenAI.Constants/Chat/ChatPrices.cs +++ b/src/libs/OpenAI.Constants/Chat/ChatPrices.cs @@ -8,16 +8,16 @@ public static class ChatPrices { /// /// - /// - /// + /// + /// /// /// public static double? TryGet( ChatModel model, - int completionTokens, - int promptTokens) + int outputTokens, + int inputTokens) { - var (promptPricePerToken, completionPricePerToken) = model.Value switch + var (inputPricePerToken, outputPricePerToken) = model.Value switch { ChatModel.Gpt4Value => (0.03 * 0.001, 0.06 * 0.001), ChatModel.Gpt4_0613Value => (0.03 * 0.001, 0.06 * 0.001), @@ -25,6 +25,9 @@ public static class ChatPrices ChatModel.Gpt4_32kValue => (0.06 * 0.001, 0.12 * 0.001), ChatModel.Gpt4_32k_0613Value => (0.06 * 0.001, 0.12 * 0.001), + ChatModel.Gpt4_1106_PreviewValue => (0.01 * 0.001, 0.03 * 0.001), + ChatModel.Gpt4VisionPreviewValue => (0.01 * 0.001, 0.03 * 0.001), + ChatModel.Gpt35TurboValue => (0.0015 * 0.001, 0.002 * 0.001), ChatModel.Gpt35Turbo_0613Value => (0.0015 * 0.001, 0.002 * 0.001), @@ -33,25 +36,25 @@ public static class ChatPrices _ => (-1.0, -1.0) }; - if (promptPricePerToken < 0.0) + if (inputPricePerToken < 0.0) { return null; } - return completionTokens * completionPricePerToken + - promptTokens * promptPricePerToken; + return outputTokens * outputPricePerToken + + inputTokens * inputPricePerToken; } /// public static double Get( ChatModel model, - int completionTokens, - int promptTokens) + int outputTokens, + int inputTokens) { return TryGet( model: model, - completionTokens: completionTokens, - promptTokens: promptTokens) ?? + outputTokens: outputTokens, + inputTokens: inputTokens) ?? throw new NotImplementedException(); } } \ No newline at end of file diff --git a/src/libs/OpenAI.Constants/Context/ContextLengths.cs b/src/libs/OpenAI.Constants/Context/ContextLengths.cs index eec0f66b..9c338c6f 100644 --- a/src/libs/OpenAI.Constants/Context/ContextLengths.cs +++ b/src/libs/OpenAI.Constants/Context/ContextLengths.cs @@ -23,6 +23,9 @@ public static class ContextLengths ChatModel.Gpt4_32kValue => 32_768, ChatModel.Gpt4_32k_0613Value => 32_768, + ChatModel.Gpt4_1106_PreviewValue => 128_000, + ChatModel.Gpt4VisionPreviewValue => 128_000, + ChatModel.Gpt35TurboValue => 4_096, ChatModel.Gpt35Turbo_0613Value => 4_096, diff --git a/src/libs/OpenAI.Constants/Embedding/EmbeddingPrices.cs b/src/libs/OpenAI.Constants/Embedding/EmbeddingPrices.cs index c4377f0d..9b0e6bc4 100644 --- a/src/libs/OpenAI.Constants/Embedding/EmbeddingPrices.cs +++ b/src/libs/OpenAI.Constants/Embedding/EmbeddingPrices.cs @@ -3,46 +3,41 @@ namespace OpenAI.Constants; /// /// All prices in USD.
/// According https://openai.com/pricing/
-/// According https://platform.openai.com/docs/guides/embeddings/embedding-models <br/> +/// According https://platform.openai.com/docs/guides/embeddings/embedding-models
///
public static class EmbeddingPrices { /// /// - /// - /// + /// /// /// public static double? TryGet( EmbeddingModel model, - int completionTokens, - int promptTokens) + int tokens) { - var (promptPricePerToken, completionPricePerToken) = model.Value switch + var pricePerToken = model.Value switch { - EmbeddingModel.Ada002Value => (0.0001 * 0.001, 0.0), + EmbeddingModel.Ada002Value => 0.0001 * 0.001, - _ => (-1.0, -1.0) + _ => -1.0, }; - if (promptPricePerToken < 0.0) + if (pricePerToken < 0.0) { return null; } - return completionTokens * completionPricePerToken + - promptTokens * promptPricePerToken; + return tokens * pricePerToken; } - /// + /// public static double Get( EmbeddingModel model, - int completionTokens, - int promptTokens) + int tokens) { return TryGet( model: model, - completionTokens: completionTokens, - promptTokens: promptTokens) ?? + tokens: tokens) ?? throw new NotImplementedException(); } } \ No newline at end of file diff --git a/src/libs/OpenAI.Constants/FineTuning/FineTuningPrices.cs b/src/libs/OpenAI.Constants/FineTuning/FineTuningPrices.cs new file mode 100644 index 00000000..f3000e47 --- /dev/null +++ b/src/libs/OpenAI.Constants/FineTuning/FineTuningPrices.cs @@ -0,0 +1,52 @@ +namespace OpenAI.Constants; + +/// +/// All prices in USD.
+/// According https://openai.com/pricing/
+///
+public static class FineTuningPrices +{ + /// + /// + /// + /// + /// + /// + /// + public static double? TryGet( + ChatModel model, + int trainingTokens, + int inputTokens, + int outputTokens) + { + var (trainingPricePerToken, inputPricePerToken, outputPricePerToken) = model.Value switch + { + ChatModel.Gpt35TurboValue => (0.0080 * 0.001, 0.0030 * 0.001, 0.0060 * 0.001), + + _ => (-1.0, -1.0, -1.0), + }; + if (trainingPricePerToken < 0.0) + { + return null; + } + + return trainingTokens * trainingPricePerToken + + inputTokens * inputPricePerToken + + outputTokens * outputPricePerToken; + } + + /// + public static double Get( + ChatModel model, + int trainingTokens, + int inputTokens, + int outputTokens) + { + return TryGet( + model: model, + trainingTokens: trainingTokens, + inputTokens: inputTokens, + outputTokens: outputTokens) ?? + throw new NotImplementedException(); + } +} \ No newline at end of file diff --git a/src/libs/OpenAI.Constants/OpenAI.Constants.csproj b/src/libs/OpenAI.Constants/OpenAI.Constants.csproj index 49caa4fd..e0aa60cc 100644 --- a/src/libs/OpenAI.Constants/OpenAI.Constants.csproj +++ b/src/libs/OpenAI.Constants/OpenAI.Constants.csproj @@ -7,8 +7,11 @@ Static classes that provide actual constants(models/context lengts/prices) for the OpenAI API. Available types in OpenAI.Constants namespace: - - AudioModel - - AudioPrices + - SpeechToTextModel + - SpeechToTextPrices + - TextToSpeechModel + - TextToSpeechPrices + - FineTuningPrices - ChatModel - ChatPrices - EmbeddingModel diff --git a/src/libs/OpenAI.Constants/Audio/AudioModel.cs b/src/libs/OpenAI.Constants/SpeechToText/SpeechToTextModel.cs similarity index 71% rename from src/libs/OpenAI.Constants/Audio/AudioModel.cs rename to src/libs/OpenAI.Constants/SpeechToText/SpeechToTextModel.cs index efb6ae51..8cbe3d1e 100644 --- a/src/libs/OpenAI.Constants/Audio/AudioModel.cs +++ b/src/libs/OpenAI.Constants/SpeechToText/SpeechToTextModel.cs @@ -3,7 +3,7 @@ namespace OpenAI.Constants; /// /// According https://platform.openai.com/docs/guides/speech-to-text /// -public readonly record struct AudioModel(string Value) +public readonly record struct SpeechToTextModel(string Value) { /// /// Transcribe audio into whatever language the audio is in.
@@ -12,7 +12,7 @@ public readonly record struct AudioModel(string Value) internal const string Whisper1Value = "whisper-1"; /// - public static AudioModel Whisper1 { get; } = new(Whisper1Value); + public static SpeechToTextModel Whisper1 { get; } = new(Whisper1Value); /// public override string ToString() @@ -21,11 +21,11 @@ public override string ToString() } /// - /// Implicitly converts to . + /// Implicitly converts to . /// /// /// - public static implicit operator string(AudioModel model) + public static implicit operator string(SpeechToTextModel model) { return model.Value; } diff --git a/src/libs/OpenAI.Constants/Audio/AudioPrices.cs b/src/libs/OpenAI.Constants/SpeechToText/SpeechToTextModelPrices.cs similarity index 83% rename from src/libs/OpenAI.Constants/Audio/AudioPrices.cs rename to src/libs/OpenAI.Constants/SpeechToText/SpeechToTextModelPrices.cs index 121cf1ba..af64b352 100644 --- a/src/libs/OpenAI.Constants/Audio/AudioPrices.cs +++ b/src/libs/OpenAI.Constants/SpeechToText/SpeechToTextModelPrices.cs @@ -12,12 +12,12 @@ public static class AudioPrices /// /// public static double? TryGet( - AudioModel model, + SpeechToTextModel model, int seconds) { var pricePerMinute = model.Value switch { - AudioModel.Whisper1Value => 0.006, + SpeechToTextModel.Whisper1Value => 0.006, _ => -1.0, }; @@ -29,9 +29,9 @@ public static class AudioPrices return seconds * pricePerMinute / 60.0; } - /// + /// public static double Get( - AudioModel model, + SpeechToTextModel model, int seconds) { return TryGet( diff --git a/src/libs/OpenAI.Constants/TextToSpeech/TextToSpeechModel.cs b/src/libs/OpenAI.Constants/TextToSpeech/TextToSpeechModel.cs new file mode 100644 index 00000000..9238916d --- /dev/null +++ b/src/libs/OpenAI.Constants/TextToSpeech/TextToSpeechModel.cs @@ -0,0 +1,41 @@ +namespace OpenAI.Constants; + +/// +/// According https://platform.openai.com/docs/guides/text-to-speech +/// +public readonly record struct TextToSpeechModel(string Value) +{ + /// + /// Transcribe audio into whatever language the audio is in.
+ /// File uploads are currently limited to 25 MB and the following input file types are supported: mp3, mp4, mpeg, mpga, m4a, wav, and webm.
+ ///
+ internal const string Tts1Value = "tts-1"; + + /// + /// Transcribe audio into whatever language the audio is in.
+ /// File uploads are currently limited to 25 MB and the following input file types are supported: mp3, mp4, mpeg, mpga, m4a, wav, and webm.
+ ///
+ internal const string Tts1HdValue = "tts-1-hd"; + + /// + public static TextToSpeechModel Tts1 { get; } = new(Tts1Value); + + /// + public static TextToSpeechModel Tts1Hd { get; } = new(Tts1HdValue); + + /// + public override string ToString() + { + return Value; + } + + /// + /// Implicitly converts to . + /// + /// + /// + public static implicit operator string(TextToSpeechModel model) + { + return model.Value; + } +} \ No newline at end of file diff --git a/src/libs/OpenAI.Constants/TextToSpeech/TextToSpeechPrices.cs b/src/libs/OpenAI.Constants/TextToSpeech/TextToSpeechPrices.cs new file mode 100644 index 00000000..5721b7fa --- /dev/null +++ b/src/libs/OpenAI.Constants/TextToSpeech/TextToSpeechPrices.cs @@ -0,0 +1,43 @@ +namespace OpenAI.Constants; + +/// +/// All prices in USD.
+/// According https://openai.com/pricing/
+///
+public static class TextToSpeechPrices +{ + /// + /// + /// + /// + /// + public static double? TryGet( + TextToSpeechModel model, + int characters) + { + var pricePerCharacter = model.Value switch + { + TextToSpeechModel.Tts1Value => 0.015 * 0.001, + TextToSpeechModel.Tts1HdValue => 0.030 * 0.001, + + _ => -1.0, + }; + if (pricePerCharacter < 0.0) + { + return null; + } + + return characters * pricePerCharacter; + } + + /// + public static double Get( + TextToSpeechModel model, + int characters) + { + return TryGet( + model: model, + characters: characters) ?? + throw new NotImplementedException(); + } +} \ No newline at end of file