diff --git a/LangChain.sln b/LangChain.sln
index 199827e5..c794dd7a 100644
--- a/LangChain.sln
+++ b/LangChain.sln
@@ -286,6 +286,18 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LangChain.Providers.Anthrop
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LangChain.Providers.Anthropic.Generators", "src\Providers\Anthropic\libs\Anthropic.Generator\LangChain.Providers.Anthropic.Generators.csproj", "{358ACF7D-0859-4009-BFC3-1F3B6FE4BB86}"
EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "DeepSeek", "DeepSeek", "{674D7125-B893-45FA-94D8-8CE0946578A5}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LangChain.Providers.DeepSeek", "src\Providers\DeepSeek\src\LangChain.Providers.DeepSeek.csproj", "{6217F9EB-35EE-4E36-BF29-B1E49CEDC5A5}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LangChain.Providers.DeepSeek.Tests", "src\Providers\DeepSeek\tests\LangChain.Providers.DeepSeek.Tests.csproj", "{33813E14-1889-49F5-AD4B-81EBABBFCC49}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "TogetherAi", "TogetherAi", "{CD001ADE-7B22-4325-9B3C-5C7D3B812797}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LangChain.Providers.TogetherAi", "src\Providers\TogetherAI\src\LangChain.Providers.TogetherAi.csproj", "{15D318BC-3954-4B32-B72A-B0FC97048EB3}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LangChain.Providers.TogetherAi.Tests", "src\Providers\TogetherAI\tests\LangChain.Providers.TogetherAi.Tests.csproj", "{0161A0F5-D55C-418A-BF54-AEE8EC73C52C}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -596,6 +608,22 @@ Global
{358ACF7D-0859-4009-BFC3-1F3B6FE4BB86}.Debug|Any CPU.Build.0 = Debug|Any CPU
{358ACF7D-0859-4009-BFC3-1F3B6FE4BB86}.Release|Any CPU.ActiveCfg = Release|Any CPU
{358ACF7D-0859-4009-BFC3-1F3B6FE4BB86}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6217F9EB-35EE-4E36-BF29-B1E49CEDC5A5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6217F9EB-35EE-4E36-BF29-B1E49CEDC5A5}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6217F9EB-35EE-4E36-BF29-B1E49CEDC5A5}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6217F9EB-35EE-4E36-BF29-B1E49CEDC5A5}.Release|Any CPU.Build.0 = Release|Any CPU
+ {33813E14-1889-49F5-AD4B-81EBABBFCC49}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {33813E14-1889-49F5-AD4B-81EBABBFCC49}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {33813E14-1889-49F5-AD4B-81EBABBFCC49}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {33813E14-1889-49F5-AD4B-81EBABBFCC49}.Release|Any CPU.Build.0 = Release|Any CPU
+ {15D318BC-3954-4B32-B72A-B0FC97048EB3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {15D318BC-3954-4B32-B72A-B0FC97048EB3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {15D318BC-3954-4B32-B72A-B0FC97048EB3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {15D318BC-3954-4B32-B72A-B0FC97048EB3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {0161A0F5-D55C-418A-BF54-AEE8EC73C52C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {0161A0F5-D55C-418A-BF54-AEE8EC73C52C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {0161A0F5-D55C-418A-BF54-AEE8EC73C52C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {0161A0F5-D55C-418A-BF54-AEE8EC73C52C}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -713,6 +741,12 @@ Global
{CAE7BB48-A6FF-41A5-A418-D34E18A1BC00} = {7A2A589D-F8EF-4744-9BEE-B06A5F109851}
{2FB02C8A-4537-401D-9DAB-171E8562EB6A} = {7A2A589D-F8EF-4744-9BEE-B06A5F109851}
{358ACF7D-0859-4009-BFC3-1F3B6FE4BB86} = {7A2A589D-F8EF-4744-9BEE-B06A5F109851}
+ {674D7125-B893-45FA-94D8-8CE0946578A5} = {E2B9833C-0397-4FAF-A3A8-116E58749750}
+ {6217F9EB-35EE-4E36-BF29-B1E49CEDC5A5} = {674D7125-B893-45FA-94D8-8CE0946578A5}
+ {33813E14-1889-49F5-AD4B-81EBABBFCC49} = {674D7125-B893-45FA-94D8-8CE0946578A5}
+ {CD001ADE-7B22-4325-9B3C-5C7D3B812797} = {E2B9833C-0397-4FAF-A3A8-116E58749750}
+ {15D318BC-3954-4B32-B72A-B0FC97048EB3} = {CD001ADE-7B22-4325-9B3C-5C7D3B812797}
+ {0161A0F5-D55C-418A-BF54-AEE8EC73C52C} = {CD001ADE-7B22-4325-9B3C-5C7D3B812797}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {5C00D0F1-6138-4ED9-846B-97E43D6DFF1C}
diff --git a/src/Providers/DeepSeek/src/DeepSeekConfiguration.cs b/src/Providers/DeepSeek/src/DeepSeekConfiguration.cs
new file mode 100644
index 00000000..26b967cf
--- /dev/null
+++ b/src/Providers/DeepSeek/src/DeepSeekConfiguration.cs
@@ -0,0 +1,12 @@
+using LangChain.Providers.OpenAI;
+
+namespace LangChain.Providers.DeepSeek;
+
+///
+///
+public class DeepSeekConfiguration : OpenAiConfiguration
+{
+ ///
+ ///
+ public new const string SectionName = "DeepSeek";
+}
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/src/DeepSeekModel.cs b/src/Providers/DeepSeek/src/DeepSeekModel.cs
new file mode 100644
index 00000000..a12789f0
--- /dev/null
+++ b/src/Providers/DeepSeek/src/DeepSeekModel.cs
@@ -0,0 +1,10 @@
+using LangChain.Providers.OpenAI;
+
+namespace LangChain.Providers.DeepSeek;
+
+///
+///
+public class DeepSeekModel(
+ DeepSeekProvider provider,
+ string id)
+ : OpenAiChatModel(provider, DeepSeekModels.GetModelById(id));
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/src/DeepSeekModels.cs b/src/Providers/DeepSeek/src/DeepSeekModels.cs
new file mode 100644
index 00000000..246f376e
--- /dev/null
+++ b/src/Providers/DeepSeek/src/DeepSeekModels.cs
@@ -0,0 +1,31 @@
+using OpenAI.Constants;
+
+namespace LangChain.Providers.DeepSeek;
+
+public static class DeepSeekModels
+{
+ ///
+ /// Good at general tasks
+ /// Context Length 16k
+ ///
+ public const string DeepSeekChat = "deepseek-chat";
+
+ ///
+ /// Good at coding tasks
+ /// Context Length 16k
+ ///
+ public const string DeepSeekCoder = "deepseek-coder";
+
+ public static ChatModels GetModelById(string id)
+ {
+ switch (id)
+ {
+ case DeepSeekChat:
+ return new ChatModels(DeepSeekChat, 16 * 1000, 0, 0);
+ case DeepSeekCoder:
+ return new ChatModels(DeepSeekCoder, 16 * 1000, 0, 0);
+ default:
+ throw new NotImplementedException("Not a valid DeepSeek model.");
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/src/DeepSeekProvider.cs b/src/Providers/DeepSeek/src/DeepSeekProvider.cs
new file mode 100644
index 00000000..cf210368
--- /dev/null
+++ b/src/Providers/DeepSeek/src/DeepSeekProvider.cs
@@ -0,0 +1,14 @@
+using LangChain.Providers.OpenAI;
+
+namespace LangChain.Providers.DeepSeek;
+
+public class DeepSeekProvider : OpenAiProvider
+{
+ public DeepSeekProvider(DeepSeekConfiguration configuration) : base(configuration)
+ {
+ }
+
+ public DeepSeekProvider(string apiKey) : base(apiKey, "api.deepseek.com")
+ {
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/src/LangChain.Providers.DeepSeek.csproj b/src/Providers/DeepSeek/src/LangChain.Providers.DeepSeek.csproj
new file mode 100644
index 00000000..3ce21a48
--- /dev/null
+++ b/src/Providers/DeepSeek/src/LangChain.Providers.DeepSeek.csproj
@@ -0,0 +1,21 @@
+
+
+
+ net4.6.2;netstandard2.0;net6.0;net7.0;net8.0
+
+
+
+
+
+
+
+ DeepSeek Chat model provider.
+ $(PackageTags);deepseek;api
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/src/Predefined/DeepSeek.cs b/src/Providers/DeepSeek/src/Predefined/DeepSeek.cs
new file mode 100644
index 00000000..589ee145
--- /dev/null
+++ b/src/Providers/DeepSeek/src/Predefined/DeepSeek.cs
@@ -0,0 +1,9 @@
+namespace LangChain.Providers.DeepSeek.Predefined;
+
+///
+public class DeepSeekChatModel(DeepSeekProvider provider)
+ : DeepSeekModel(provider, DeepSeekModels.DeepSeekChat);
+
+///
+public class DeepSeekCoderModel(DeepSeekProvider provider)
+ : DeepSeekModel(provider, DeepSeekModels.DeepSeekCoder);
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/tests/DeepSeekTests.cs b/src/Providers/DeepSeek/tests/DeepSeekTests.cs
new file mode 100644
index 00000000..1a4e2630
--- /dev/null
+++ b/src/Providers/DeepSeek/tests/DeepSeekTests.cs
@@ -0,0 +1,40 @@
+using LangChain.Providers.DeepSeek.Predefined;
+
+namespace LangChain.Providers.DeepSeek.Tests;
+
+[TestFixture]
+[Explicit]
+public class DeepSeekTests
+{
+ [Test]
+ public async Task ShouldGenerateFine_WithChatModel()
+ {
+ var apiKey =
+ Environment.GetEnvironmentVariable("DeepSeek_API_Key", EnvironmentVariableTarget.User) ??
+ throw new InvalidOperationException("DeepSeek_API_Key is not set");
+
+ var model = new DeepSeekChatModel(new DeepSeekProvider(apiKey));
+
+ var result = await model.GenerateAsync("Write a Poem".AsHumanMessage());
+
+ result.Messages.Count.Should().BeGreaterThan(0);
+ result.Messages.Last().Content.Should().NotBeNullOrEmpty();
+ Console.WriteLine(result.LastMessageContent);
+ }
+
+ [Test]
+ public async Task ShouldGenerateFine_With_CoderModel()
+ {
+ var apiKey =
+ Environment.GetEnvironmentVariable("DeepSeek_API_Key", EnvironmentVariableTarget.User) ??
+ throw new InvalidOperationException("DeepSeek_API_Key is not set");
+
+ var model = new DeepSeekCoderModel(new DeepSeekProvider(apiKey));
+
+ var result = await model.GenerateAsync("Write a python script to count from 0 to 100".AsHumanMessage());
+
+ result.Messages.Count.Should().BeGreaterThan(0);
+ result.Messages.Last().Content.Should().NotBeNullOrEmpty();
+ Console.WriteLine(result.LastMessageContent);
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/DeepSeek/tests/LangChain.Providers.DeepSeek.Tests.csproj b/src/Providers/DeepSeek/tests/LangChain.Providers.DeepSeek.Tests.csproj
new file mode 100644
index 00000000..cc9d4f04
--- /dev/null
+++ b/src/Providers/DeepSeek/tests/LangChain.Providers.DeepSeek.Tests.csproj
@@ -0,0 +1,11 @@
+
+
+
+ net8.0
+
+
+
+
+
+
+
diff --git a/src/Providers/OpenRouter/src/OpenRouterModel.cs b/src/Providers/OpenRouter/src/OpenRouterModel.cs
index ad631ffb..f4129772 100644
--- a/src/Providers/OpenRouter/src/OpenRouterModel.cs
+++ b/src/Providers/OpenRouter/src/OpenRouterModel.cs
@@ -20,14 +20,5 @@ public OpenRouterModel(OpenRouterProvider provider,
{
}
- #region MyRegion
-
- protected override Task CallFunctionsAsync(global::OpenAI.Chat.Message message, List messages, CancellationToken cancellationToken = default)
- {
- if (!this.Id.Contains("openai/"))
- throw new NotImplementedException("Function calling is only supported with OpenAI Models.");
- return base.CallFunctionsAsync(message, messages, cancellationToken);
- }
-
- #endregion
+
}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/LangChain.Providers.TogetherAi.csproj b/src/Providers/TogetherAI/src/LangChain.Providers.TogetherAi.csproj
new file mode 100644
index 00000000..d805f5b6
--- /dev/null
+++ b/src/Providers/TogetherAI/src/LangChain.Providers.TogetherAi.csproj
@@ -0,0 +1,22 @@
+
+
+
+ net4.6.2;netstandard2.0;net6.0;net7.0;net8.0
+
+
+
+
+
+
+
+ Together.ai Chat model provider.
+ $(PackageTags);together;ai;together.ai;api
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/Predefined/AllModels.cs b/src/Providers/TogetherAI/src/Predefined/AllModels.cs
new file mode 100644
index 00000000..e279c619
--- /dev/null
+++ b/src/Providers/TogetherAI/src/Predefined/AllModels.cs
@@ -0,0 +1,287 @@
+namespace LangChain.Providers.TogetherAi.Predefined;
+
+///
+/// TogetherAi Provider Instance
+public class ChronosHermes13BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.ChronosHermes13B);
+
+///
+/// TogetherAi Provider Instance
+public class MythomaxL213BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.MythomaxL213B);
+
+///
+/// TogetherAi Provider Instance
+public class NousCapybaraV197BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousCapybaraV197B);
+
+///
+/// TogetherAi Provider Instance
+public class NousHermes2MistralDpo7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousHermes2MistralDpo7B);
+
+///
+/// TogetherAi Provider Instance
+public class NousHermes2Mixtral8X7BDpoModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousHermes2Mixtral8X7BDpo);
+
+///
+/// TogetherAi Provider Instance
+public class NousHermes2Mixtral8X7BSftModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousHermes2Mixtral8X7BSft);
+
+///
+/// TogetherAi Provider Instance
+public class NousHermes2Yi34BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousHermes2Yi34B);
+
+///
+/// TogetherAi Provider Instance
+public class NousHermesLlama213BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousHermesLlama213B);
+
+///
+/// TogetherAi Provider Instance
+public class NousHermesLlama27BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.NousHermesLlama27B);
+
+///
+/// TogetherAi Provider Instance
+public class OpenOrcaMistral7B8KModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.OpenOrcaMistral7B8K);
+
+///
+/// TogetherAi Provider Instance
+public class PhindCodeLlamaV234BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.PhindCodeLlamaV234B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat05BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat05B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat18BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat18B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat14BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat14B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat32BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat32B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat4BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat4B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat72BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat72B);
+
+///
+/// TogetherAi Provider Instance
+public class Qwen15Chat7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Qwen15Chat7B);
+
+///
+/// TogetherAi Provider Instance
+public class RemmSlerpL213BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.RemmSlerpL213B);
+
+///
+/// TogetherAi Provider Instance
+public class ToppyM7BModel(TogetherAiProvider provider) : TogetherAiModel(provider, TogetherAiModelIds.ToppyM7B);
+
+///
+/// TogetherAi Provider Instance
+public class WizardcoderV1015BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.WizardcoderV1015B);
+
+///
+/// TogetherAi Provider Instance
+public class WizardcoderPythonV1034BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.WizardcoderPythonV1034B);
+
+///
+/// TogetherAi Provider Instance
+public class WizardlmV1213BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.WizardlmV1213B);
+
+///
+/// TogetherAi Provider Instance
+public class OlmoInstruct7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.OlmoInstruct7B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaInstruct13BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaInstruct13B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaPython13BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaPython13B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaInstruct34BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaInstruct34B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaPython34BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaPython34B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaInstruct70BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaInstruct70B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaPython70BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaPython70B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlama70BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlama70B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaInstruct7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaInstruct7B);
+
+///
+/// TogetherAi Provider Instance
+public class CodeLlamaPython7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.CodeLlamaPython7B);
+
+///
+/// TogetherAi Provider Instance
+public class Dolphin25Mixtral8X7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Dolphin25Mixtral8X7B);
+
+///
+/// TogetherAi Provider Instance
+public class DeepseekCoderInstruct33BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.DeepseekCoderInstruct33B);
+
+///
+/// TogetherAi Provider Instance
+public class DeepseekLlmChat67BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.DeepseekLlmChat67B);
+
+///
+/// TogetherAi Provider Instance
+public class Platypus2Instruct70BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Platypus2Instruct70B);
+
+///
+/// TogetherAi Provider Instance
+public class GemmaInstruct2BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.GemmaInstruct2B);
+
+///
+/// TogetherAi Provider Instance
+public class GemmaInstruct7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.GemmaInstruct7B);
+
+///
+/// TogetherAi Provider Instance
+public class VicunaV1513BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.VicunaV1513B);
+
+///
+/// TogetherAi Provider Instance
+public class VicunaV157BModel(TogetherAiProvider provider) : TogetherAiModel(provider, TogetherAiModelIds.VicunaV157B);
+
+///
+/// TogetherAi Provider Instance
+public class Llama2Chat13BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Llama2Chat13B);
+
+///
+/// TogetherAi Provider Instance
+public class Llama2Chat70BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Llama2Chat70B);
+
+///
+/// TogetherAi Provider Instance
+public class Llama2Chat7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Llama2Chat7B);
+
+///
+/// TogetherAi Provider Instance
+public class Mistral7BInstructModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Mistral7BInstruct);
+
+///
+/// TogetherAi Provider Instance
+public class Mistral7BInstructV02Model(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Mistral7BInstructV02);
+
+///
+/// TogetherAi Provider Instance
+public class Mixtral8X7BInstructV01Model(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Mixtral8X7BInstructV01);
+
+///
+/// TogetherAi Provider Instance
+public class OpenChat35Model(TogetherAiProvider provider) : TogetherAiModel(provider, TogetherAiModelIds.OpenChat35);
+
+///
+/// TogetherAi Provider Instance
+public class SnorkelMistralPairrmDpo7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.SnorkelMistralPairrmDpo7B);
+
+///
+/// TogetherAi Provider Instance
+public class OpenHermes2Mistral7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.OpenHermes2Mistral7B);
+
+///
+/// TogetherAi Provider Instance
+public class OpenHermes25Mistral7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.OpenHermes25Mistral7B);
+
+///
+/// TogetherAi Provider Instance
+public class Llama27B32KInstruct7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.Llama27B32KInstruct7B);
+
+///
+/// TogetherAi Provider Instance
+public class RedpajamaInciteChat7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.RedpajamaInciteChat7B);
+
+///
+/// TogetherAi Provider Instance
+public class RedpajamaInciteChat3BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.RedpajamaInciteChat3B);
+
+///
+/// TogetherAi Provider Instance
+public class StripedhyenaNous7BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.StripedhyenaNous7B);
+
+///
+/// TogetherAi Provider Instance
+public class Alpaca7BModel(TogetherAiProvider provider) : TogetherAiModel(provider, TogetherAiModelIds.Alpaca7B);
+
+///
+/// TogetherAi Provider Instance
+public class UpstageSolarInstructV111BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds.UpstageSolarInstructV111B);
+
+///
+/// TogetherAi Provider Instance
+public class _01AiYiChat34BModel(TogetherAiProvider provider)
+ : TogetherAiModel(provider, TogetherAiModelIds._01AiYiChat34B);
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/TogetherAiConfiguration.cs b/src/Providers/TogetherAI/src/TogetherAiConfiguration.cs
new file mode 100644
index 00000000..ec9d95ba
--- /dev/null
+++ b/src/Providers/TogetherAI/src/TogetherAiConfiguration.cs
@@ -0,0 +1,12 @@
+using LangChain.Providers.OpenAI;
+
+namespace LangChain.Providers.TogetherAi;
+
+///
+///
+public class TogetherAiConfiguration : OpenAiConfiguration
+{
+ ///
+ ///
+ public new const string SectionName = "OpenRouter";
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/TogetherAiModel.cs b/src/Providers/TogetherAI/src/TogetherAiModel.cs
new file mode 100644
index 00000000..dce52625
--- /dev/null
+++ b/src/Providers/TogetherAI/src/TogetherAiModel.cs
@@ -0,0 +1,23 @@
+using LangChain.Providers.OpenAI;
+using LangChain.Providers.OpenRouter;
+using OpenAI.Constants;
+
+namespace LangChain.Providers.TogetherAi;
+
+///
+///
+public class TogetherAiModel(TogetherAiProvider provider, ChatModels model) : OpenAiChatModel(provider, model)
+{
+ public TogetherAiModel(TogetherAiProvider provider,
+ TogetherAiModelIds id) : this(provider, TogetherAiModelProvider.GetModelById(id))
+ {
+ }
+
+ public TogetherAiModel(TogetherAiProvider provider, string id) : this(provider, new ChatModels(
+ id,
+ 0,
+ PricePerOutputTokenInUsd: 0.0,
+ PricePerInputTokenInUsd: 0.0))
+ {
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/TogetherAiModelIds.cs b/src/Providers/TogetherAI/src/TogetherAiModelIds.cs
new file mode 100644
index 00000000..a630713f
--- /dev/null
+++ b/src/Providers/TogetherAI/src/TogetherAiModelIds.cs
@@ -0,0 +1,763 @@
+namespace LangChain.Providers.TogetherAi;
+
+///
+/// List of all the Predefined TogetherAi Models
+///
+public enum TogetherAiModelIds
+{
+ ///
+ /// Name: Chronos Hermes (13B)
+ /// Organization: Austism
+ /// Context Length: 2048
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: This model is a 75/25 merge of Chronos (13B) and Nous Hermes (13B) models resulting in having a great
+ /// ability to produce evocative storywriting and follow a narrative.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Austism/chronos-hermes-13b
+ ///
+ ChronosHermes13B = 0,
+
+ ///
+ /// Name: MythoMax-L2 (13B)
+ /// Organization: Gryphe
+ /// Context Length: 4096
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: MythoLogic-L2 and Huginn merge using a highly experimental tensor type merge technique. The main
+ /// difference with MythoMix is that I allowed more of Huginn to intermingle with the single tensors located at the
+ /// front and end of a model
+ /// HuggingFace Url:
+ /// https://huggingface.co/Gryphe/MythoMax-L2-13b
+ ///
+ MythomaxL213B = 3,
+
+ ///
+ /// Name: Nous Capybara v1.9 (7B)
+ /// Organization: NousResearch
+ /// Context Length: 8192
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: first Nous collection of dataset and models made by fine-tuning mostly on data created by Nous
+ /// in-house
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Capybara-7B-V1p9
+ ///
+ NousCapybaraV197B = 6,
+
+ ///
+ /// Name: Nous Hermes 2 - Mistral DPO (7B)
+ /// Organization: NousResearch
+ /// Context Length: 32768
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Nous Hermes 2 on Mistral 7B DPO is the new flagship 7B Hermes! This model was DPO'd from
+ /// Teknium/OpenHermes-2.5-Mistral-7B and has improved across the board on all benchmarks tested - AGIEval, BigBench
+ /// Reasoning, GPT4All, and TruthfulQA.
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO
+ ///
+ NousHermes2MistralDpo7B = 7,
+
+ ///
+ /// Name: Nous Hermes 2 - Mixtral 8x7B-DPO
+ /// Organization: NousResearch
+ /// Context Length: 32768
+ /// Prompt Cost: $0.6/MTok
+ /// Completion Cost: $0.6/MTok
+ /// Description: Nous Hermes 2 Mixtral 7bx8 DPO is the new flagship Nous Research model trained over the Mixtral 7bx8
+ /// MoE LLM. The model was trained on over 1,000,000 entries of primarily GPT-4 generated data, as well as other high
+ /// quality data from open datasets across the AI landscape, achieving state of the art performance on a variety of
+ /// tasks.
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
+ ///
+ NousHermes2Mixtral8X7BDpo = 8,
+
+ ///
+ /// Name: Nous Hermes 2 - Mixtral 8x7B-SFT
+ /// Organization: NousResearch
+ /// Context Length: 32768
+ /// Prompt Cost: $0.6/MTok
+ /// Completion Cost: $0.6/MTok
+ /// Description: Nous Hermes 2 Mixtral 7bx8 SFT is the new flagship Nous Research model trained over the Mixtral 7bx8
+ /// MoE LLM. The model was trained on over 1,000,000 entries of primarily GPT-4 generated data, as well as other high
+ /// quality data from open datasets across the AI landscape, achieving state of the art performance on a variety of
+ /// tasks.
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT
+ ///
+ NousHermes2Mixtral8X7BSft = 9,
+
+ ///
+ /// Name: Nous Hermes-2 Yi (34B)
+ /// Organization: NousResearch
+ /// Context Length: 4096
+ /// Prompt Cost: $0.8/MTok
+ /// Completion Cost: $0.8/MTok
+ /// Description: Nous Hermes 2 - Yi-34B is a state of the art Yi Fine-tune
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B
+ ///
+ NousHermes2Yi34B = 10,
+
+ ///
+ /// Name: Nous Hermes Llama-2 (13B)
+ /// Organization: NousResearch
+ /// Context Length: 4096
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: Nous-Hermes-Llama2-13b is a state-of-the-art language model fine-tuned on over 300,000 instructions.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Hermes-Llama2-13b
+ ///
+ NousHermesLlama213B = 11,
+
+ ///
+ /// Name: Nous Hermes LLaMA-2 (7B)
+ /// Organization: NousResearch
+ /// Context Length: 4096
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Nous-Hermes-Llama2-7b is a state-of-the-art language model fine-tuned on over 300,000 instructions.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/NousResearch/Nous-Hermes-llama-2-7b
+ ///
+ NousHermesLlama27B = 12,
+
+ ///
+ /// Name: OpenOrca Mistral (7B) 8K
+ /// Organization: OpenOrca
+ /// Context Length: 8192
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: An OpenOrca dataset fine-tune on top of Mistral 7B by the OpenOrca team.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
+ ///
+ OpenOrcaMistral7B8K = 13,
+
+ ///
+ /// Name: Phind Code LLaMA v2 (34B)
+ /// Organization: Phind
+ /// Context Length: 16384
+ /// Prompt Cost: $0.8/MTok
+ /// Completion Cost: $0.8/MTok
+ /// Description: Phind-CodeLlama-34B-v1 trained on additional 1.5B tokens high-quality programming-related data
+ /// proficient in Python, C/C++, TypeScript, Java, and more.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
+ ///
+ PhindCodeLlamaV234B = 14,
+
+ ///
+ /// Name: Qwen 1.5 Chat (0.5B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.1/MTok
+ /// Completion Cost: $0.1/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-0.5B-Chat
+ ///
+ Qwen15Chat05B = 15,
+
+ ///
+ /// Name: Qwen 1.5 Chat (1.8B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.1/MTok
+ /// Completion Cost: $0.1/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-1.8B-Chat
+ ///
+ Qwen15Chat18B = 17,
+
+ ///
+ /// Name: Qwen 1.5 Chat (14B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-14B-Chat
+ ///
+ Qwen15Chat14B = 19,
+
+ ///
+ /// Name: Qwen 1.5 Chat (32B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.8/MTok
+ /// Completion Cost: $0.8/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-32B-Chat
+ ///
+ Qwen15Chat32B = 21,
+
+ ///
+ /// Name: Qwen 1.5 Chat (4B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.1/MTok
+ /// Completion Cost: $0.1/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-4B-Chat
+ ///
+ Qwen15Chat4B = 23,
+
+ ///
+ /// Name: Qwen 1.5 Chat (72B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-72B-Chat
+ ///
+ Qwen15Chat72B = 25,
+
+ ///
+ /// Name: Qwen 1.5 Chat (7B)
+ /// Organization: Qwen
+ /// Context Length: 32768
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a
+ /// large amount of data. In comparison with the previous released Qwen.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Qwen/Qwen1.5-7B-Chat
+ ///
+ Qwen15Chat7B = 27,
+
+ ///
+ /// Name: ReMM SLERP L2 (13B)
+ /// Organization: Undi95
+ /// Context Length: 4096
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: Re:MythoMax (ReMM) is a recreation trial of the original MythoMax-L2-B13 with updated models. This
+ /// merge use SLERP [TESTING] to merge ReML and Huginn v1.2.
+ /// HuggingFace Url:
+ /// https://huggingface.co/Undi95/ReMM-SLERP-L2-13B
+ ///
+ RemmSlerpL213B = 30,
+
+ ///
+ /// Name: Toppy M (7B)
+ /// Organization: Undi95
+ /// Context Length: 4096
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: A merge of models built by Undi95 with the new task_arithmetic merge method from mergekit.
+ /// HuggingFace Url: https://huggingface.co/Undi95/Toppy-M-7B
+ ///
+ ToppyM7B = 31,
+
+ ///
+ /// Name: WizardCoder v1.0 (15B)
+ /// Organization: WizardLM
+ /// Context Length: 8192
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: This model empowers Code LLMs with complex instruction fine-tuning, by adapting the Evol-Instruct
+ /// method to the domain of code.
+ /// HuggingFace Url:
+ /// https://huggingface.co/WizardLM/WizardCoder-15B-V1.0
+ ///
+ WizardcoderV1015B = 33,
+
+ ///
+ /// Name: WizardCoder Python v1.0 (34B)
+ /// Organization: WizardLM
+ /// Context Length: 8192
+ /// Prompt Cost: $0.8/MTok
+ /// Completion Cost: $0.8/MTok
+ /// Description: This model empowers Code LLMs with complex instruction fine-tuning, by adapting the Evol-Instruct
+ /// method to the domain of code.
+ /// HuggingFace Url:
+ /// https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0
+ ///
+ WizardcoderPythonV1034B = 34,
+
+ ///
+ /// Name: WizardLM v1.2 (13B)
+ /// Organization: WizardLM
+ /// Context Length: 4096
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: This model achieves a substantial and comprehensive improvement on coding, mathematical reasoning and
+ /// open-domain conversation capacities
+ /// HuggingFace Url:
+ /// https://huggingface.co/WizardLM/WizardLM-13B-V1.2
+ ///
+ WizardlmV1213B = 35,
+
+ ///
+ /// Name: OLMo Instruct (7B)
+ /// Organization: AllenAI
+ /// Context Length: 2048
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: The OLMo models are trained on the Dolma dataset
+ /// HuggingFace Url:
+ /// https://huggingface.co/allenai/OLMo-7B-Instruct
+ ///
+ OlmoInstruct7B = 36,
+
+ ///
+ /// Name: Code Llama Instruct (13B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.22/MTok
+ /// Completion Cost: $0.22/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf
+ ///
+ CodeLlamaInstruct13B = 40,
+
+ ///
+ /// Name: Code Llama Python (13B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.22/MTok
+ /// Completion Cost: $0.22/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-13b-Python-hf
+ ///
+ CodeLlamaPython13B = 41,
+
+ ///
+ /// Name: Code Llama Instruct (34B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.776/MTok
+ /// Completion Cost: $0.776/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf
+ ///
+ CodeLlamaInstruct34B = 42,
+
+ ///
+ /// Name: Code Llama Python (34B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.776/MTok
+ /// Completion Cost: $0.776/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-34b-Python-hf
+ ///
+ CodeLlamaPython34B = 43,
+
+ ///
+ /// Name: Code Llama Instruct (70B)
+ /// Organization: Meta
+ /// Context Length: 4096
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf
+ ///
+ CodeLlamaInstruct70B = 44,
+
+ ///
+ /// Name: Code Llama Python (70B)
+ /// Organization: Meta
+ /// Context Length: 4096
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-70b-Python-hf
+ ///
+ CodeLlamaPython70B = 45,
+
+ ///
+ /// Name: Code Llama (70B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-70b-hf
+ ///
+ CodeLlama70B = 46,
+
+ ///
+ /// Name: Code Llama Instruct (7B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf
+ ///
+ CodeLlamaInstruct7B = 47,
+
+ ///
+ /// Name: Code Llama Python (7B)
+ /// Organization: Meta
+ /// Context Length: 16384
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Code Llama is a family of large language models for code based on Llama 2 providing infilling
+ /// capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/codellama/CodeLlama-7b-Python-hf
+ ///
+ CodeLlamaPython7B = 48,
+
+ ///
+ /// Name: Dolphin 2.5 Mixtral 8x7b
+ /// Organization: cognitivecomputations
+ /// Context Length: 32768
+ /// Prompt Cost: $0.6/MTok
+ /// Completion Cost: $0.6/MTok
+ /// Description: This Dolphin is really good at coding, I trained with a lot of coding data. It is very obedient but it
+ /// is not DPO tuned - so you still might need to encourage it in the system prompt as I show in the below examples.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/cognitivecomputations/dolphin-2.5-mixtral-8x7b
+ ///
+ Dolphin25Mixtral8X7B = 49,
+
+ ///
+ /// Name: Deepseek Coder Instruct (33B)
+ /// Organization: DeepSeek
+ /// Context Length: 16384
+ /// Prompt Cost: $0.8/MTok
+ /// Completion Cost: $0.8/MTok
+ /// Description: Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T
+ /// tokens, with a composition of 87% code and 13% natural language in both English and Chinese.
+ /// HuggingFace Url:
+ /// https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct
+ ///
+ DeepseekCoderInstruct33B = 50,
+
+ ///
+ /// Name: DeepSeek LLM Chat (67B)
+ /// Organization: DeepSeek
+ /// Context Length: 4096
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: trained from scratch on a vast dataset of 2 trillion tokens in both English and Chinese
+ /// HuggingFace Url:
+ /// https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat
+ ///
+ DeepseekLlmChat67B = 51,
+
+ ///
+ /// Name: Platypus2 Instruct (70B)
+ /// Organization: garage-bAInd
+ /// Context Length: 4096
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: An instruction fine-tuned LLaMA-2 (70B) model by merging Platypus2 (70B) by garage-bAInd and LLaMA-2
+ /// Instruct v2 (70B) by upstage.
+ /// HuggingFace Url:
+ /// https://huggingface.co/garage-bAInd/Platypus2-70B-instruct
+ ///
+ Platypus2Instruct70B = 52,
+
+ ///
+ /// Name: Gemma Instruct (2B)
+ /// Organization: Google
+ /// Context Length: 8192
+ /// Prompt Cost: $0.1/MTok
+ /// Completion Cost: $0.1/MTok
+ /// Description: Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same
+ /// research and technology used to create the Gemini models.
+ /// HuggingFace Url: https://huggingface.co/google/gemma-2b-it
+ ///
+ GemmaInstruct2B = 53,
+
+ ///
+ /// Name: Gemma Instruct (7B)
+ /// Organization: Google
+ /// Context Length: 8192
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same
+ /// research and technology used to create the Gemini models.
+ /// HuggingFace Url: https://huggingface.co/google/gemma-7b-it
+ ///
+ GemmaInstruct7B = 55,
+
+ ///
+ /// Name: Vicuna v1.5 (13B)
+ /// Organization: LM Sys
+ /// Context Length: 4096
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: Vicuna is a chat assistant trained by fine-tuning Llama 2 on user-shared conversations collected from
+ /// ShareGPT.
+ /// HuggingFace Url:
+ /// https://huggingface.co/lmsys/vicuna-13b-v1.5
+ ///
+ VicunaV1513B = 57,
+
+ ///
+ /// Name: Vicuna v1.5 (7B)
+ /// Organization: LM Sys
+ /// Context Length: 4096
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Vicuna is a chat assistant trained by fine-tuning Llama 2 on user-shared conversations collected from
+ /// ShareGPT.
+ /// HuggingFace Url:
+ /// https://huggingface.co/lmsys/vicuna-7b-v1.5
+ ///
+ VicunaV157B = 58,
+
+ ///
+ /// Name: LLaMA-2 Chat (13B)
+ /// Organization: Meta
+ /// Context Length: 4096
+ /// Prompt Cost: $0.22/MTok
+ /// Completion Cost: $0.22/MTok
+ /// Description: Llama 2-chat leverages publicly available instruction datasets and over 1 million human annotations.
+ /// Available in three sizes: 7B, 13B and 70B parameters
+ /// HuggingFace Url:
+ /// https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
+ ///
+ Llama2Chat13B = 59,
+
+ ///
+ /// Name: LLaMA-2 Chat (70B)
+ /// Organization: Meta
+ /// Context Length: 4096
+ /// Prompt Cost: $0.9/MTok
+ /// Completion Cost: $0.9/MTok
+ /// Description: Llama 2-chat leverages publicly available instruction datasets and over 1 million human annotations.
+ /// Available in three sizes: 7B, 13B and 70B parameters
+ /// HuggingFace Url:
+ /// https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
+ ///
+ Llama2Chat70B = 61,
+
+ ///
+ /// Name: LLaMA-2 Chat (7B)
+ /// Organization: Meta
+ /// Context Length: 4096
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Llama 2-chat leverages publicly available instruction datasets and over 1 million human annotations.
+ /// Available in three sizes: 7B, 13B and 70B parameters
+ /// HuggingFace Url:
+ /// https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
+ ///
+ Llama2Chat7B = 63,
+
+ ///
+ /// Name: Mistral (7B) Instruct
+ /// Organization: mistralai
+ /// Context Length: 4096
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: instruct fine-tuned version of Mistral-7B-v0.1
+ /// HuggingFace Url:
+ /// https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
+ ///
+ Mistral7BInstruct = 66,
+
+ ///
+ /// Name: Mistral (7B) Instruct v0.2
+ /// Organization: mistralai
+ /// Context Length: 32768
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an improved instruct fine-tuned version of
+ /// Mistral-7B-Instruct-v0.1.
+ /// HuggingFace Url:
+ /// https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
+ ///
+ Mistral7BInstructV02 = 67,
+
+ ///
+ /// Name: Mixtral-8x7B Instruct v0.1
+ /// Organization: mistralai
+ /// Context Length: 32768
+ /// Prompt Cost: $0.6/MTok
+ /// Completion Cost: $0.6/MTok
+ /// Description: The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
+ ///
+ Mixtral8X7BInstructV01 = 69,
+
+ ///
+ /// Name: OpenChat 3.5
+ /// Organization: OpenChat
+ /// Context Length: 8192
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: A merge of OpenChat 3.5 was trained with C-RLFT on a collection of publicly available high-quality
+ /// instruction data, with a custom processing pipeline.
+ /// HuggingFace Url:
+ /// https://huggingface.co/openchat/openchat-3.5-1210
+ ///
+ OpenChat35 = 71,
+
+ ///
+ /// Name: Snorkel Mistral PairRM DPO (7B)
+ /// Organization: Snorkel AI
+ /// Context Length: 32768
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: A state-of-the-art model by Snorkel AI, DPO fine-tuned on Mistral-7B
+ /// HuggingFace Url:
+ /// https://huggingface.co/snorkelai/Snorkel-Mistral-PairRM-DPO
+ ///
+ SnorkelMistralPairrmDpo7B = 75,
+
+ ///
+ /// Name: OpenHermes-2-Mistral (7B)
+ /// Organization: teknium
+ /// Context Length: 8192
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: State of the art Mistral Fine-tuned on extensive public datasets
+ /// HuggingFace Url:
+ /// https://huggingface.co/teknium/OpenHermes-2-Mistral-7B
+ ///
+ OpenHermes2Mistral7B = 78,
+
+ ///
+ /// Name: OpenHermes-2.5-Mistral (7B)
+ /// Organization: teknium
+ /// Context Length: 8192
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Continuation of OpenHermes 2 Mistral model trained on additional code datasets
+ /// HuggingFace Url:
+ /// https://huggingface.co/teknium/OpenHermes-2p5-Mistral-7B
+ ///
+ OpenHermes25Mistral7B = 79,
+
+ ///
+ /// Name: LLaMA-2-7B-32K-Instruct (7B)
+ /// Organization: Together
+ /// Context Length: 32768
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Extending LLaMA-2 to 32K context, built with Meta's Position Interpolation and Together AI's data
+ /// recipe and system optimizations, instruction tuned by Together
+ /// HuggingFace Url:
+ /// https://huggingface.co/togethercomputer/Llama-2-7B-32K-Instruct
+ ///
+ Llama27B32KInstruct7B = 82,
+
+ ///
+ /// Name: RedPajama-INCITE Chat (7B)
+ /// Organization: Together
+ /// Context Length: 2048
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Chat model fine-tuned using data from Dolly 2.0 and Open Assistant over the
+ /// RedPajama-INCITE-Base-7B-v1 base model.
+ /// HuggingFace Url:
+ /// https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat
+ ///
+ RedpajamaInciteChat7B = 84,
+
+ ///
+ /// Name: RedPajama-INCITE Chat (3B)
+ /// Organization: Together
+ /// Context Length: 2048
+ /// Prompt Cost: $0.1/MTok
+ /// Completion Cost: $0.1/MTok
+ /// Description: Chat model fine-tuned using data from Dolly 2.0 and Open Assistant over the
+ /// RedPajama-INCITE-Base-3B-v1 base model.
+ /// HuggingFace Url:
+ /// https://huggingface.co/togethercomputer/RedPajama-INCITE-Chat-3B-v1
+ ///
+ RedpajamaInciteChat3B = 87,
+
+ ///
+ /// Name: StripedHyena Nous (7B)
+ /// Organization: Together
+ /// Context Length: 32768
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: A hybrid architecture composed of multi-head, grouped-query attention and gated convolutions arranged
+ /// in Hyena blocks, different from traditional decoder-only Transformers
+ /// HuggingFace Url:
+ /// https://huggingface.co/togethercomputer/StripedHyena-Nous-7B
+ ///
+ StripedhyenaNous7B = 90,
+
+ ///
+ /// Name: Alpaca (7B)
+ /// Organization: Stanford
+ /// Context Length: 2048
+ /// Prompt Cost: $0.2/MTok
+ /// Completion Cost: $0.2/MTok
+ /// Description: Fine-tuned from the LLaMA 7B model on 52K instruction-following demonstrations.
+ /// HuggingFace Url:
+ /// https://huggingface.co/togethercomputer/alpaca-7b
+ ///
+ Alpaca7B = 91,
+
+ ///
+ /// Name: Upstage SOLAR Instruct v1 (11B)
+ /// Organization: upstage
+ /// Context Length: 4096
+ /// Prompt Cost: $0.3/MTok
+ /// Completion Cost: $0.3/MTok
+ /// Description: Built on the Llama2 architecture, SOLAR-10.7B incorporates the innovative Upstage Depth Up-Scaling
+ ///
+ /// HuggingFace Url:
+ /// https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0
+ ///
+ UpstageSolarInstructV111B = 97,
+
+ ///
+ /// Name: 01-ai Yi Chat (34B)
+ /// Organization: 01.AI
+ /// Context Length: 4096
+ /// Prompt Cost: $0.8/MTok
+ /// Completion Cost: $0.8/MTok
+ /// Description: The Yi series models are large language models trained from scratch by developers at 01.AI
+ /// HuggingFace Url:
+ /// https://huggingface.co/zero-one-ai/Yi-34B-Chat
+ ///
+ _01AiYiChat34B = 100
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/TogetherAiModelProvider.cs b/src/Providers/TogetherAI/src/TogetherAiModelProvider.cs
new file mode 100644
index 00000000..4d210642
--- /dev/null
+++ b/src/Providers/TogetherAI/src/TogetherAiModelProvider.cs
@@ -0,0 +1,275 @@
+using LangChain.Providers.TogetherAi;
+using OpenAI.Constants;
+
+namespace LangChain.Providers.OpenRouter;
+
+///
+/// Contains all the TogetherAi models.
+///
+public static class TogetherAiModelProvider
+{
+ static TogetherAiModelProvider()
+ {
+ var dic = new Dictionary
+ {
+ {
+ TogetherAiModelIds.ChronosHermes13B,
+ new ChatModels("Austism/chronos-hermes-13b", 2048, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds.MythomaxL213B,
+ new ChatModels("Gryphe/MythoMax-L2-13b", 4096, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds.NousCapybaraV197B,
+ new ChatModels("NousResearch/Nous-Capybara-7B-V1p9", 8192, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.NousHermes2MistralDpo7B,
+ new ChatModels("NousResearch/Nous-Hermes-2-Mistral-7B-DPO", 32768, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.NousHermes2Mixtral8X7BDpo,
+ new ChatModels("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 32768, 6E-07, 6E-07)
+ },
+ {
+ TogetherAiModelIds.NousHermes2Mixtral8X7BSft,
+ new ChatModels("NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT", 32768, 6E-07, 6E-07)
+ },
+ {
+ TogetherAiModelIds.NousHermes2Yi34B,
+ new ChatModels("NousResearch/Nous-Hermes-2-Yi-34B", 4096, 8.000000000000001E-07, 8.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.NousHermesLlama213B,
+ new ChatModels("NousResearch/Nous-Hermes-Llama2-13b", 4096, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds.NousHermesLlama27B,
+ new ChatModels("NousResearch/Nous-Hermes-llama-2-7b", 4096, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.OpenOrcaMistral7B8K,
+ new ChatModels("Open-Orca/Mistral-7B-OpenOrca", 8192, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.PhindCodeLlamaV234B,
+ new ChatModels("Phind/Phind-CodeLlama-34B-v2", 16384, 8.000000000000001E-07, 8.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Qwen15Chat05B,
+ new ChatModels("Qwen/Qwen1.5-0.5B-Chat", 32768, 1.0000000000000001E-07, 1.0000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Qwen15Chat18B,
+ new ChatModels("Qwen/Qwen1.5-1.8B-Chat", 32768, 1.0000000000000001E-07, 1.0000000000000001E-07)
+ },
+ { TogetherAiModelIds.Qwen15Chat14B, new ChatModels("Qwen/Qwen1.5-14B-Chat", 32768, 3E-07, 3E-07) },
+ {
+ TogetherAiModelIds.Qwen15Chat32B,
+ new ChatModels("Qwen/Qwen1.5-32B-Chat", 32768, 8.000000000000001E-07, 8.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Qwen15Chat4B,
+ new ChatModels("Qwen/Qwen1.5-4B-Chat", 32768, 1.0000000000000001E-07, 1.0000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Qwen15Chat72B,
+ new ChatModels("Qwen/Qwen1.5-72B-Chat", 32768, 9.000000000000001E-07, 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Qwen15Chat7B,
+ new ChatModels("Qwen/Qwen1.5-7B-Chat", 32768, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.RemmSlerpL213B,
+ new ChatModels("Undi95/ReMM-SLERP-L2-13B", 4096, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds.ToppyM7B,
+ new ChatModels("Undi95/Toppy-M-7B", 4096, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.WizardcoderV1015B,
+ new ChatModels("WizardLM/WizardCoder-15B-V1.0", 8192, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds.WizardcoderPythonV1034B,
+ new ChatModels("WizardLM/WizardCoder-Python-34B-V1.0", 8192, 8.000000000000001E-07,
+ 8.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.WizardlmV1213B,
+ new ChatModels("WizardLM/WizardLM-13B-V1.2", 4096, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.OlmoInstruct7B,
+ new ChatModels("allenai/OLMo-7B-Instruct", 2048, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaInstruct13B,
+ new ChatModels("codellama/CodeLlama-13b-Instruct-hf", 16384, 2.2E-07, 2.2E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaPython13B,
+ new ChatModels("codellama/CodeLlama-13b-Python-hf", 16384, 2.2E-07, 2.2E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaInstruct34B,
+ new ChatModels("codellama/CodeLlama-34b-Instruct-hf", 16384, 7.760000000000001E-07,
+ 7.760000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaPython34B,
+ new ChatModels("codellama/CodeLlama-34b-Python-hf", 16384, 7.760000000000001E-07, 7.760000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaInstruct70B,
+ new ChatModels("codellama/CodeLlama-70b-Instruct-hf", 4096, 9.000000000000001E-07,
+ 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaPython70B,
+ new ChatModels("codellama/CodeLlama-70b-Python-hf", 4096, 9.000000000000001E-07, 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlama70B,
+ new ChatModels("codellama/CodeLlama-70b-hf", 16384, 9.000000000000001E-07, 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaInstruct7B,
+ new ChatModels("codellama/CodeLlama-7b-Instruct-hf", 16384, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.CodeLlamaPython7B,
+ new ChatModels("codellama/CodeLlama-7b-Python-hf", 16384, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Dolphin25Mixtral8X7B,
+ new ChatModels("cognitivecomputations/dolphin-2.5-mixtral-8x7b", 32768, 6E-07, 6E-07)
+ },
+ {
+ TogetherAiModelIds.DeepseekCoderInstruct33B,
+ new ChatModels("deepseek-ai/deepseek-coder-33b-instruct", 16384, 8.000000000000001E-07,
+ 8.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.DeepseekLlmChat67B,
+ new ChatModels("deepseek-ai/deepseek-llm-67b-chat", 4096, 9.000000000000001E-07, 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Platypus2Instruct70B,
+ new ChatModels("garage-bAInd/Platypus2-70B-instruct", 4096, 9.000000000000001E-07,
+ 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.GemmaInstruct2B,
+ new ChatModels("google/gemma-2b-it", 8192, 1.0000000000000001E-07, 1.0000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.GemmaInstruct7B,
+ new ChatModels("google/gemma-7b-it", 8192, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.VicunaV1513B,
+ new ChatModels("lmsys/vicuna-13b-v1.5", 4096, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds.VicunaV157B,
+ new ChatModels("lmsys/vicuna-7b-v1.5", 4096, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Llama2Chat13B,
+ new ChatModels("meta-llama/Llama-2-13b-chat-hf", 4096, 2.2E-07, 2.2E-07)
+ },
+ {
+ TogetherAiModelIds.Llama2Chat70B,
+ new ChatModels("meta-llama/Llama-2-70b-chat-hf", 4096, 9.000000000000001E-07, 9.000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.Llama2Chat7B,
+ new ChatModels("meta-llama/Llama-2-7b-chat-hf", 4096, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Mistral7BInstruct,
+ new ChatModels("mistralai/Mistral-7B-Instruct-v0.1", 4096, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Mistral7BInstructV02,
+ new ChatModels("mistralai/Mistral-7B-Instruct-v0.2", 32768, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Mixtral8X7BInstructV01,
+ new ChatModels("mistralai/Mixtral-8x7B-Instruct-v0.1", 32768, 6E-07, 6E-07)
+ },
+ {
+ TogetherAiModelIds.OpenChat35,
+ new ChatModels("openchat/openchat-3.5-1210", 8192, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.SnorkelMistralPairrmDpo7B,
+ new ChatModels("snorkelai/Snorkel-Mistral-PairRM-DPO", 32768, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.OpenHermes2Mistral7B,
+ new ChatModels("teknium/OpenHermes-2-Mistral-7B", 8192, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.OpenHermes25Mistral7B,
+ new ChatModels("teknium/OpenHermes-2p5-Mistral-7B", 8192, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Llama27B32KInstruct7B,
+ new ChatModels("togethercomputer/Llama-2-7B-32K-Instruct", 32768, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.RedpajamaInciteChat7B,
+ new ChatModels("togethercomputer/RedPajama-INCITE-7B-Chat", 2048, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.RedpajamaInciteChat3B,
+ new ChatModels("togethercomputer/RedPajama-INCITE-Chat-3B-v1", 2048, 1.0000000000000001E-07,
+ 1.0000000000000001E-07)
+ },
+ {
+ TogetherAiModelIds.StripedhyenaNous7B,
+ new ChatModels("togethercomputer/StripedHyena-Nous-7B", 32768, 2.0000000000000002E-07,
+ 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.Alpaca7B,
+ new ChatModels("togethercomputer/alpaca-7b", 2048, 2.0000000000000002E-07, 2.0000000000000002E-07)
+ },
+ {
+ TogetherAiModelIds.UpstageSolarInstructV111B,
+ new ChatModels("upstage/SOLAR-10.7B-Instruct-v1.0", 4096, 3E-07, 3E-07)
+ },
+ {
+ TogetherAiModelIds._01AiYiChat34B,
+ new ChatModels("zero-one-ai/Yi-34B-Chat", 4096, 8.000000000000001E-07, 8.000000000000001E-07)
+ }
+ };
+
+ Models = dic;
+ }
+
+ private static IReadOnlyDictionary Models { get; }
+
+ public static ChatModels GetModelById(TogetherAiModelIds modelId)
+ {
+ if (Models.ContainsKey(modelId))
+ return Models[modelId];
+ throw new ArgumentException($"Invalid Together.Ai Model {modelId.ToString()}");
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/src/TogetherAiProvider.cs b/src/Providers/TogetherAI/src/TogetherAiProvider.cs
new file mode 100644
index 00000000..656fa6bf
--- /dev/null
+++ b/src/Providers/TogetherAI/src/TogetherAiProvider.cs
@@ -0,0 +1,14 @@
+using LangChain.Providers.OpenAI;
+
+namespace LangChain.Providers.TogetherAi;
+
+public class TogetherAiProvider : OpenAiProvider
+{
+ public TogetherAiProvider(TogetherAiConfiguration configuration) : base(configuration)
+ {
+ }
+
+ public TogetherAiProvider(string apiKey) : base(apiKey, "api.together.xyz")
+ {
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/tests/LangChain.Providers.TogetherAi.Tests.csproj b/src/Providers/TogetherAI/tests/LangChain.Providers.TogetherAi.Tests.csproj
new file mode 100644
index 00000000..d5ff1c3e
--- /dev/null
+++ b/src/Providers/TogetherAI/tests/LangChain.Providers.TogetherAi.Tests.csproj
@@ -0,0 +1,11 @@
+
+
+
+ net8.0
+
+
+
+
+
+
+
diff --git a/src/Providers/TogetherAI/tests/Tests.cs b/src/Providers/TogetherAI/tests/Tests.cs
new file mode 100644
index 00000000..eacad7eb
--- /dev/null
+++ b/src/Providers/TogetherAI/tests/Tests.cs
@@ -0,0 +1,30 @@
+using LangChain.Providers.TogetherAi.Predefined;
+
+namespace LangChain.Providers.TogetherAi.Tests;
+
+[TestFixture]
+[Explicit]
+public class GeneralTests
+{
+ [Test]
+ public async Task GetWeather()
+ {
+ var apiKey =
+ Environment.GetEnvironmentVariable("TogetherAi_Api_Key", EnvironmentVariableTarget.User) ??
+ throw new InvalidOperationException("TogetherAi_Api_Key environment variable is not found.");
+
+ var model = new Mixtral8X7BInstructV01Model(new TogetherAiProvider(apiKey));
+
+ var service = new WeatherService();
+ model.AddGlobalTools(service.AsTools(), service.AsCalls());
+
+ var response = await model.GenerateAsync(
+ new[]
+ {
+ "You are a helpful weather assistant.".AsSystemMessage(),
+ "What's the weather like today in Dubai, UAE?".AsHumanMessage()
+ });
+
+ Console.WriteLine(response.Messages.AsHistory());
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/tests/TogetherAiTests.cs b/src/Providers/TogetherAI/tests/TogetherAiTests.cs
new file mode 100644
index 00000000..34684853
--- /dev/null
+++ b/src/Providers/TogetherAI/tests/TogetherAiTests.cs
@@ -0,0 +1,40 @@
+using LangChain.Providers.TogetherAi.Predefined;
+
+namespace LangChain.Providers.TogetherAi.Tests;
+
+[TestFixture]
+[Explicit]
+public class TogetherAiTests
+{
+ [Test]
+ public async Task ShouldGenerateFine_WithPredefinedModel()
+ {
+ var apiKey =
+ Environment.GetEnvironmentVariable("TogetherAi_Api_Key", EnvironmentVariableTarget.User) ??
+ throw new InvalidOperationException("TogetherAi_Api_Key is not set");
+
+ var model = new Mixtral8X7BInstructV01Model(new TogetherAiProvider(apiKey));
+
+ var result = await model.GenerateAsync("Write a Poem".AsHumanMessage());
+
+ result.Messages.Count.Should().BeGreaterThan(0);
+ result.Messages.Last().Content.Should().NotBeNullOrEmpty();
+ Console.WriteLine(result.LastMessageContent);
+ }
+
+ [Test]
+ public async Task ShouldGenerateFine_With_Enum_Model()
+ {
+ var apiKey =
+ Environment.GetEnvironmentVariable("TogetherAi_Api_Key", EnvironmentVariableTarget.User) ??
+ throw new InvalidOperationException("TogetherAi_Api_Key is not set");
+
+ var model = new TogetherAiModel(new TogetherAiProvider(apiKey), TogetherAiModelIds.OpenHermes25Mistral7B);
+
+ var result = await model.GenerateAsync("Write a Poem".AsHumanMessage());
+
+ result.Messages.Count.Should().BeGreaterThan(0);
+ result.Messages.Last().Content.Should().NotBeNullOrEmpty();
+ Console.WriteLine(result.LastMessageContent);
+ }
+}
\ No newline at end of file
diff --git a/src/Providers/TogetherAI/tests/WeatherFunctions.cs b/src/Providers/TogetherAI/tests/WeatherFunctions.cs
new file mode 100644
index 00000000..6879a4b1
--- /dev/null
+++ b/src/Providers/TogetherAI/tests/WeatherFunctions.cs
@@ -0,0 +1,61 @@
+using tryAGI.OpenAI;
+using DescriptionAttribute = System.ComponentModel.DescriptionAttribute;
+
+namespace LangChain.Providers.TogetherAi.Tests;
+
+public enum Unit
+{
+ Celsius,
+ Fahrenheit
+}
+
+public class Weather
+{
+ public string Location { get; set; } = string.Empty;
+ public double Temperature { get; set; }
+ public Unit Unit { get; set; }
+ public string Description { get; set; } = string.Empty;
+}
+
+[OpenAiFunctions]
+public interface IWeatherFunctions
+{
+ [Description("Get the current weather in a given location")]
+ public Weather GetCurrentWeather(
+ [Description("The city and state, e.g. San Francisco, CA")]
+ string location,
+ Unit unit = Unit.Celsius);
+
+ [Description("Get the current weather in a given location")]
+ public Task GetCurrentWeatherAsync(
+ [Description("The city and state, e.g. San Francisco, CA")]
+ string location,
+ Unit unit = Unit.Celsius,
+ CancellationToken cancellationToken = default);
+}
+
+public class WeatherService : IWeatherFunctions
+{
+ public Weather GetCurrentWeather(string location, Unit unit = Unit.Celsius)
+ {
+ return new Weather
+ {
+ Location = location,
+ Temperature = 22.0,
+ Unit = unit,
+ Description = "Sunny"
+ };
+ }
+
+ public Task GetCurrentWeatherAsync(string location, Unit unit = Unit.Celsius,
+ CancellationToken cancellationToken = default)
+ {
+ return Task.FromResult(new Weather
+ {
+ Location = location,
+ Temperature = 22.0,
+ Unit = unit,
+ Description = "Sunny"
+ });
+ }
+}
\ No newline at end of file