diff --git a/README.md b/README.md index 1648873..155e035 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ const titanText = createAwsBedrockModelProvider({ }); const response = await titanText.sendRequest({ - prompt: "Brief history of NY Mets:" + $prompt:"Brief history of NY Mets:" // all other options for the specified `api` available here }); @@ -66,7 +66,7 @@ const commandR = createCohereModelProvider({ }); const response = await commandR.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt:"Brief History of NY Mets:", preamble: "Talk like Jafar from Aladdin", // all other Cohere /generate options available here }); @@ -88,7 +88,7 @@ const gemini = await createVertexAiModelProvider({ }); const response = await gemini.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt:"Brief History of NY Mets:", // all other Gemini options available here }); @@ -109,7 +109,7 @@ const llama3 = createGroqModelProvider({ }); const response = await llama3.sendRequest({ - prompt: "Brief History of NY Mets:" + $prompt:"Brief History of NY Mets:" // all other OpenAI ChatCompletion options available here (Groq uses the OpenAI ChatCompletion API for all the models it hosts) }); @@ -135,7 +135,7 @@ const gpt2 = createHuggingfaceInferenceModelProvider({ }); const response = await gpt2.sendRequest({ - prompt: "Hello," + $prompt:"Hello," // all other options for the specified `api` available here }); @@ -155,7 +155,7 @@ const llama3 = createLmStudioModelProvider({ }); const response = await llama3.sendRequest({ - prompt: "Brief History of NY Mets:" + $prompt:"Brief History of NY Mets:" // all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts) }); @@ -176,7 +176,7 @@ const mistralLarge = createMistralModelProvider({ }); const response = await mistralLarge.sendRequest({ - prompt: "Brief History of NY Mets:" + $prompt:"Brief History of NY Mets:" // all other Mistral ChatCompletion API options available here }); @@ -197,7 +197,7 @@ const gpt = createOpenAiChatModelProvider({ }); const response = await gpt.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt:"Brief History of NY Mets:", max_tokens: 100, // all other OpenAI ChatCompletion options available here }); diff --git a/examples/few-shot-chat-models.ts b/examples/few-shot-chat-models.ts index 04749d9..6d0864a 100644 --- a/examples/few-shot-chat-models.ts +++ b/examples/few-shot-chat-models.ts @@ -22,7 +22,7 @@ const payload = { assistant: '{ "answer": "The LA Dodgers won the World Series in 2020." }', }, ], - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", }; async function main() { diff --git a/examples/index.ts b/examples/index.ts index 4134b14..3ce8c46 100644 --- a/examples/index.ts +++ b/examples/index.ts @@ -15,7 +15,7 @@ import { } from "@packages/core"; async function main() { - const prompt = "Brief History of NY Mets:"; + const $prompt = "Brief History of NY Mets:"; const gptProvider = createOpenAiChatModelProvider({ modelId: "gpt-4-turbo", @@ -76,7 +76,7 @@ async function main() { provider: gptProvider, params: { system: "talk like jafar from aladdin", - prompt, + $prompt, max_tokens: 50, temperature: 1.0, }, @@ -84,31 +84,31 @@ async function main() { { name: "Titan(AWS)", provider: titanTextProvider, - params: { prompt, maxTokenCount: 50, temperature: 1.0 }, + params: { $prompt, maxTokenCount: 50, temperature: 1.0 }, }, { name: "Cohere-Command(AWS)", provider: cohereCommandProvider, - params: { prompt, max_tokens: 50, temperature: 1.0 }, + params: { $prompt, max_tokens: 50, temperature: 1.0 }, }, { name: "DialoGPT(HF)", provider: hfConvoProvider, params: { - prompt, + $prompt, parameters: { max_new_tokens: 50, temperature: 1.0 }, }, }, { name: "GPT2(HF)", provider: hfTextgenProvider, - params: { prompt, parameters: { max_new_tokens: 50, temperature: 1.0 } }, + params: { $prompt, parameters: { max_new_tokens: 50, temperature: 1.0 } }, }, { name: "LLama3(LM Studio)", provider: lmStudioProvider, params: { - prompt, + $prompt, system: "talk like iago from aladdin", temperature: 1.0, max_tokens: 50, @@ -118,7 +118,7 @@ async function main() { name: "Llama3(AWS)", provider: llama3aws, params: { - prompt, + $prompt, system: "talk like jafar from aladdin", temperature: 1.0, }, @@ -126,18 +126,18 @@ async function main() { { name: "Jurassic2(AWS)", provider: jurassic, - params: { prompt, maxTokens: 50, temperature: 1.0 }, + params: { $prompt, maxTokens: 50, temperature: 1.0 }, }, { name: "Mistral(AWS)", provider: mistral, - params: { prompt, temperature: 1.0 }, + params: { $prompt, temperature: 1.0 }, }, { name: "Lama3-70b(Groq)", provider: groqProvider, params: { - prompt, + $prompt, system: "talk like jafar from aladdin", temperature: 1.0, }, @@ -145,7 +145,7 @@ async function main() { { name: "Cohere-Command(Cohere-API)", provider: cohereProvider, - params: { prompt }, + params: { $prompt }, }, ]; diff --git a/packages/core/src/apis/ai21/__snapshots__/jurassic2.spec.ts.snap b/packages/core/src/apis/ai21/__snapshots__/jurassic2.spec.ts.snap index d20298c..883383d 100644 --- a/packages/core/src/apis/ai21/__snapshots__/jurassic2.spec.ts.snap +++ b/packages/core/src/apis/ai21/__snapshots__/jurassic2.spec.ts.snap @@ -1,5 +1,11 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP +exports[`Ai21Jurassic2Api: $prompt 1`] = ` +{ + "prompt": "mock-prompt", +} +`; + exports[`Ai21Jurassic2Api: all options 1`] = ` { "countPenalty": { @@ -39,9 +45,3 @@ exports[`Ai21Jurassic2Api: all options 1`] = ` "topP": 0.9, } `; - -exports[`Ai21Jurassic2Api: prompt 1`] = ` -{ - "prompt": "mock-prompt", -} -`; diff --git a/packages/core/src/apis/ai21/jurassic2.spec.ts b/packages/core/src/apis/ai21/jurassic2.spec.ts index 2350fd3..b32ede0 100644 --- a/packages/core/src/apis/ai21/jurassic2.spec.ts +++ b/packages/core/src/apis/ai21/jurassic2.spec.ts @@ -17,9 +17,9 @@ describe("Ai21Jurassic2Api:", () => { * FewShotRequestOptions (prompt): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); @@ -31,7 +31,7 @@ describe("Ai21Jurassic2Api:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", numResults: 3, maxTokens: 200, minTokens: 100, diff --git a/packages/core/src/apis/ai21/jurassic2.ts b/packages/core/src/apis/ai21/jurassic2.ts index c814e0e..bf3b128 100644 --- a/packages/core/src/apis/ai21/jurassic2.ts +++ b/packages/core/src/apis/ai21/jurassic2.ts @@ -40,7 +40,7 @@ export interface Ai21Jurassic2Options extends ModelRequestOptions { */ export const Ai21Jurassic2Template = new FnTemplate( ({ - prompt, + $prompt, numResults, maxTokens, minTokens, @@ -54,7 +54,7 @@ export const Ai21Jurassic2Template = new FnTemplate( }: Ai21Jurassic2Options) => { return JSON.stringify( { - prompt, + prompt: $prompt, numResults, maxTokens, minTokens, diff --git a/packages/core/src/apis/amazon/__snapshots__/titanText.spec.ts.snap b/packages/core/src/apis/amazon/__snapshots__/titanText.spec.ts.snap index a901809..a9d134a 100644 --- a/packages/core/src/apis/amazon/__snapshots__/titanText.spec.ts.snap +++ b/packages/core/src/apis/amazon/__snapshots__/titanText.spec.ts.snap @@ -1,5 +1,11 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP +exports[`AmazonTitanTextApi: $prompt 1`] = ` +{ + "inputText": "mock-prompt", +} +`; + exports[`AmazonTitanTextApi: all options 1`] = ` { "inputText": "mock-prompt", @@ -14,9 +20,3 @@ exports[`AmazonTitanTextApi: all options 1`] = ` }, } `; - -exports[`AmazonTitanTextApi: prompt 1`] = ` -{ - "inputText": "mock-prompt", -} -`; diff --git a/packages/core/src/apis/amazon/titanText.spec.ts b/packages/core/src/apis/amazon/titanText.spec.ts index 161808a..5daacd0 100644 --- a/packages/core/src/apis/amazon/titanText.spec.ts +++ b/packages/core/src/apis/amazon/titanText.spec.ts @@ -16,9 +16,9 @@ describe("AmazonTitanTextApi:", () => { /** * FewShotRequestOptions (prompt): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); @@ -29,7 +29,7 @@ describe("AmazonTitanTextApi:", () => { */ test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", temperature: 0.7, topP: 0.9, maxTokenCount: 100, diff --git a/packages/core/src/apis/amazon/titanText.ts b/packages/core/src/apis/amazon/titanText.ts index 571559c..69e1ad1 100644 --- a/packages/core/src/apis/amazon/titanText.ts +++ b/packages/core/src/apis/amazon/titanText.ts @@ -23,14 +23,14 @@ export interface AmazonTitanTextOptions extends ModelRequestOptions { */ export const AmazonTitanTextTemplate = new FnTemplate( ({ - prompt, + $prompt, temperature, topP, maxTokenCount, stopSequences, }: AmazonTitanTextOptions) => { const rewritten = { - inputText: prompt, + inputText: $prompt, }; const textGenerationConfig = { diff --git a/packages/core/src/apis/cohere/__snapshots__/chat.spec.ts.snap b/packages/core/src/apis/cohere/__snapshots__/chat.spec.ts.snap index 4f27fdd..afda06d 100644 --- a/packages/core/src/apis/cohere/__snapshots__/chat.spec.ts.snap +++ b/packages/core/src/apis/cohere/__snapshots__/chat.spec.ts.snap @@ -1,169 +1,13 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`CohereChatApi: all options 1`] = ` -{ - "chat_history": [ - { - "message": "mock-user-msg-1", - "role": "USER", - }, - { - "message": "mock-assistant-msg-1", - "role": "CHATBOT", - }, - { - "message": "mock-user-msg", - "role": "USER", - }, - { - "role": "CHATBOT", - "tool_calls": [ - { - "name": "mock-tool", - "parameters": { - "key": "value", - }, - }, - ], - }, - { - "role": "TOOL", - "tool_results": [ - { - "call": { - "name": "mock-tool", - "parameters": { - "key": "value", - }, - }, - "outputs": [ - { - "key": "output-value", - }, - ], - }, - ], - }, - ], - "citation_quality": "high", - "conversation_id": "mock-conversation-id", - "documents": [ - { - "key": "doc-value", - }, - ], - "force_single_step": true, - "frequency_penalty": 0.5, - "k": 10, - "max_input_tokens": 1500, - "max_tokens": 1000, - "message": "mock-prompt", - "model": "mock-model-id", - "p": 0.9, - "preamble": "mock-system-textmock-preamble", - "presence_penalty": 0.3, - "prompt_truncation": "mock-prompt-truncation", - "search_queries_only": true, - "seed": 1234, - "stop_sequences": [ - "mock-stop", - ], - "stream": true, - "temperature": 0.7, - "tool_results": [ - { - "call": { - "name": "mock-tool", - "parameters": { - "key": "value", - }, - }, - "outputs": [ - { - "key": "output-value", - }, - ], - }, - ], - "tools": [ - { - "description": "mock-description", - "name": "mock-tool", - "parameter_definitions": { - "param1": { - "description": "mock-param-desc", - "required": true, - "type": "string", - }, - }, - }, - ], -} -`; - -exports[`CohereChatApi: combination of system and preamble 1`] = ` -{ - "message": "mock-prompt", - "model": "mock-model-id", - "preamble": "mock-system-textmock-preamble", -} -`; - -exports[`CohereChatApi: optional message in CohereChatHistoryToolCall 1`] = ` -{ - "chat_history": [ - { - "role": "CHATBOT", - "tool_calls": [ - { - "name": "mock-tool", - "parameters": { - "key": "value", - }, - }, - ], - }, - ], - "message": "mock-prompt", - "model": "mock-model-id", -} -`; - -exports[`CohereChatApi: optional message in CohereChatHistoryToolResults 1`] = ` -{ - "chat_history": [ - { - "role": "TOOL", - "tool_results": [ - { - "call": { - "name": "mock-tool", - "parameters": { - "key": "value", - }, - }, - "outputs": [ - { - "key": "output-value", - }, - ], - }, - ], - }, - ], - "message": "mock-prompt", - "model": "mock-model-id", -} -`; - -exports[`CohereChatApi: prompt 1`] = ` +exports[`CohereChatApi: $prompt 1`] = ` { "message": "mock-prompt", "model": "mock-model-id", } `; -exports[`CohereChatApi: prompt, chat_history 1`] = ` +exports[`CohereChatApi: $prompt, chat_history 1`] = ` { "chat_history": [ { @@ -180,7 +24,7 @@ exports[`CohereChatApi: prompt, chat_history 1`] = ` } `; -exports[`CohereChatApi: prompt, chat_history with tool_calls 1`] = ` +exports[`CohereChatApi: $prompt, chat_history with tool_calls 1`] = ` { "chat_history": [ { @@ -201,7 +45,7 @@ exports[`CohereChatApi: prompt, chat_history with tool_calls 1`] = ` } `; -exports[`CohereChatApi: prompt, examplePairs 1`] = ` +exports[`CohereChatApi: $prompt, examplePairs 1`] = ` { "chat_history": [ { @@ -226,7 +70,7 @@ exports[`CohereChatApi: prompt, examplePairs 1`] = ` } `; -exports[`CohereChatApi: prompt, examplePairs, chat_history 1`] = ` +exports[`CohereChatApi: $prompt, examplePairs, chat_history 1`] = ` { "chat_history": [ { @@ -251,7 +95,7 @@ exports[`CohereChatApi: prompt, examplePairs, chat_history 1`] = ` } `; -exports[`CohereChatApi: prompt, examplePairs, chat_history 2`] = ` +exports[`CohereChatApi: $prompt, examplePairs, chat_history 2`] = ` { "chat_history": [ { @@ -272,7 +116,7 @@ exports[`CohereChatApi: prompt, examplePairs, chat_history 2`] = ` } `; -exports[`CohereChatApi: prompt, examplePairs, system 1`] = ` +exports[`CohereChatApi: $prompt, examplePairs, system 1`] = ` { "chat_history": [ { @@ -290,7 +134,7 @@ exports[`CohereChatApi: prompt, examplePairs, system 1`] = ` } `; -exports[`CohereChatApi: prompt, examplePairs, system, chat_history 1`] = ` +exports[`CohereChatApi: $prompt, examplePairs, system, chat_history 1`] = ` { "chat_history": [ { @@ -312,7 +156,7 @@ exports[`CohereChatApi: prompt, examplePairs, system, chat_history 1`] = ` } `; -exports[`CohereChatApi: prompt, system 1`] = ` +exports[`CohereChatApi: $prompt, system 1`] = ` { "message": "mock-prompt", "model": "mock-model-id", @@ -320,7 +164,7 @@ exports[`CohereChatApi: prompt, system 1`] = ` } `; -exports[`CohereChatApi: prompt, system, chat_history 1`] = ` +exports[`CohereChatApi: $prompt, system, chat_history 1`] = ` { "chat_history": [ { @@ -338,7 +182,7 @@ exports[`CohereChatApi: prompt, system, chat_history 1`] = ` } `; -exports[`CohereChatApi: prompt, tool_results 1`] = ` +exports[`CohereChatApi: $prompt, tool_results 1`] = ` { "message": "mock-prompt", "model": "mock-model-id", @@ -360,7 +204,7 @@ exports[`CohereChatApi: prompt, tool_results 1`] = ` } `; -exports[`CohereChatApi: prompt, tools 1`] = ` +exports[`CohereChatApi: $prompt, tools 1`] = ` { "message": "mock-prompt", "model": "mock-model-id", @@ -380,6 +224,162 @@ exports[`CohereChatApi: prompt, tools 1`] = ` } `; +exports[`CohereChatApi: all options 1`] = ` +{ + "chat_history": [ + { + "message": "mock-user-msg-1", + "role": "USER", + }, + { + "message": "mock-assistant-msg-1", + "role": "CHATBOT", + }, + { + "message": "mock-user-msg", + "role": "USER", + }, + { + "role": "CHATBOT", + "tool_calls": [ + { + "name": "mock-tool", + "parameters": { + "key": "value", + }, + }, + ], + }, + { + "role": "TOOL", + "tool_results": [ + { + "call": { + "name": "mock-tool", + "parameters": { + "key": "value", + }, + }, + "outputs": [ + { + "key": "output-value", + }, + ], + }, + ], + }, + ], + "citation_quality": "high", + "conversation_id": "mock-conversation-id", + "documents": [ + { + "key": "doc-value", + }, + ], + "force_single_step": true, + "frequency_penalty": 0.5, + "k": 10, + "max_input_tokens": 1500, + "max_tokens": 1000, + "message": "mock-prompt", + "model": "mock-model-id", + "p": 0.9, + "preamble": "mock-system-textmock-preamble", + "presence_penalty": 0.3, + "prompt_truncation": "mock-prompt-truncation", + "search_queries_only": true, + "seed": 1234, + "stop_sequences": [ + "mock-stop", + ], + "stream": true, + "temperature": 0.7, + "tool_results": [ + { + "call": { + "name": "mock-tool", + "parameters": { + "key": "value", + }, + }, + "outputs": [ + { + "key": "output-value", + }, + ], + }, + ], + "tools": [ + { + "description": "mock-description", + "name": "mock-tool", + "parameter_definitions": { + "param1": { + "description": "mock-param-desc", + "required": true, + "type": "string", + }, + }, + }, + ], +} +`; + +exports[`CohereChatApi: combination of system and preamble 1`] = ` +{ + "message": "mock-prompt", + "model": "mock-model-id", + "preamble": "mock-system-textmock-preamble", +} +`; + +exports[`CohereChatApi: optional message in CohereChatHistoryToolCall 1`] = ` +{ + "chat_history": [ + { + "role": "CHATBOT", + "tool_calls": [ + { + "name": "mock-tool", + "parameters": { + "key": "value", + }, + }, + ], + }, + ], + "message": "mock-prompt", + "model": "mock-model-id", +} +`; + +exports[`CohereChatApi: optional message in CohereChatHistoryToolResults 1`] = ` +{ + "chat_history": [ + { + "role": "TOOL", + "tool_results": [ + { + "call": { + "name": "mock-tool", + "parameters": { + "key": "value", + }, + }, + "outputs": [ + { + "key": "output-value", + }, + ], + }, + ], + }, + ], + "message": "mock-prompt", + "model": "mock-model-id", +} +`; + exports[`CohereChatApi: undefined examplePairs and chat_history 1`] = ` { "message": "mock-prompt", diff --git a/packages/core/src/apis/cohere/__snapshots__/generate.spec.ts.snap b/packages/core/src/apis/cohere/__snapshots__/generate.spec.ts.snap index ba388e1..c1dadce 100644 --- a/packages/core/src/apis/cohere/__snapshots__/generate.spec.ts.snap +++ b/packages/core/src/apis/cohere/__snapshots__/generate.spec.ts.snap @@ -1,37 +1,12 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`CohereGenerateApi: all options 1`] = ` -{ - "end_sequences": [ - "mock-end-seq", - ], - "frequency_penalty": 0.5, - "k": 5, - "logit_bias": { - "50256": -100, - }, - "max_tokens": 1000, - "num_generations": 3, - "p": 0.9, - "presence_penalty": 0.3, - "prompt": "mock-prompt", - "return_likelihoods": "ALL", - "seed": 1234, - "stop_sequences": [ - "mock-stop-seq", - ], - "temperature": 0.7, - "truncate": "END", -} -`; - -exports[`CohereGenerateApi: prompt 1`] = ` +exports[`CohereGenerateApi: $prompt 1`] = ` { "prompt": "mock-prompt", } `; -exports[`CohereGenerateApi: prompt, logit_bias with multiple values 1`] = ` +exports[`CohereGenerateApi: $prompt, logit_bias with multiple values 1`] = ` { "logit_bias": { "50256": -100, @@ -41,14 +16,14 @@ exports[`CohereGenerateApi: prompt, logit_bias with multiple values 1`] = ` } `; -exports[`CohereGenerateApi: prompt, return_likelihoods as NONE 1`] = ` +exports[`CohereGenerateApi: $prompt, return_likelihoods as NONE 1`] = ` { "prompt": "mock-prompt", "return_likelihoods": "NONE", } `; -exports[`CohereGenerateApi: prompt, stop_sequences and end_sequences 1`] = ` +exports[`CohereGenerateApi: $prompt, stop_sequences and end_sequences 1`] = ` { "end_sequences": [ "mock-end-seq", @@ -60,9 +35,34 @@ exports[`CohereGenerateApi: prompt, stop_sequences and end_sequences 1`] = ` } `; -exports[`CohereGenerateApi: prompt, truncate as NONE 1`] = ` +exports[`CohereGenerateApi: $prompt, truncate as NONE 1`] = ` { "prompt": "mock-prompt", "truncate": "NONE", } `; + +exports[`CohereGenerateApi: all options 1`] = ` +{ + "end_sequences": [ + "mock-end-seq", + ], + "frequency_penalty": 0.5, + "k": 5, + "logit_bias": { + "50256": -100, + }, + "max_tokens": 1000, + "num_generations": 3, + "p": 0.9, + "presence_penalty": 0.3, + "prompt": "mock-prompt", + "return_likelihoods": "ALL", + "seed": 1234, + "stop_sequences": [ + "mock-stop-seq", + ], + "temperature": 0.7, + "truncate": "END", +} +`; diff --git a/packages/core/src/apis/cohere/chat.spec.ts b/packages/core/src/apis/cohere/chat.spec.ts index 60e8917..3436ac2 100644 --- a/packages/core/src/apis/cohere/chat.spec.ts +++ b/packages/core/src/apis/cohere/chat.spec.ts @@ -16,14 +16,14 @@ describe("CohereChatApi:", () => { /** * FewShotRequestOptions (prompt, examplePairs, system): */ - test("prompt", () => { - const rendered = render({ prompt: "mock-prompt" }); + test("$prompt", () => { + const rendered = render({ $prompt: "mock-prompt" }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -32,17 +32,17 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system", () => { + test("$prompt, examplePairs, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -51,9 +51,9 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, chat_history", () => { + test("$prompt, examplePairs, chat_history", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -68,9 +68,9 @@ describe("CohereChatApi:", () => { /** * Native few shot options (chat_history): */ - test("prompt, chat_history", () => { + test("$prompt, chat_history", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", chat_history: [ { role: "USER", message: "mock-user-msg" }, { role: "CHATBOT", message: "mock-chatbot-msg" }, @@ -79,9 +79,9 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, chat_history", () => { + test("$prompt, examplePairs, chat_history", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -90,9 +90,9 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system, chat_history", () => { + test("$prompt, system, chat_history", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", chat_history: [ { role: "USER", message: "mock-user-msg" }, @@ -102,9 +102,9 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system, chat_history", () => { + test("$prompt, examplePairs, system, chat_history", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -117,9 +117,9 @@ describe("CohereChatApi:", () => { /** * Tool-related: */ - test("prompt, tools", () => { + test("$prompt, tools", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tools: [ { name: "mock-tool", @@ -137,9 +137,9 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, tool_results", () => { + test("$prompt, tool_results", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tool_results: [ { call: { @@ -153,9 +153,9 @@ describe("CohereChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, chat_history with tool_calls", () => { + test("$prompt, chat_history with tool_calls", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", chat_history: [ { role: "USER", @@ -172,7 +172,7 @@ describe("CohereChatApi:", () => { */ test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -241,7 +241,7 @@ describe("CohereChatApi:", () => { */ test("combination of system and preamble", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", preamble: "mock-preamble", }); @@ -250,7 +250,7 @@ describe("CohereChatApi:", () => { test("optional message in CohereChatHistoryToolCall", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", chat_history: [ { role: "CHATBOT", @@ -263,7 +263,7 @@ describe("CohereChatApi:", () => { test("optional message in CohereChatHistoryToolResults", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", chat_history: [ { role: "TOOL", @@ -280,7 +280,7 @@ describe("CohereChatApi:", () => { }); test("undefined examplePairs and chat_history", () => { - const rendered = render({ prompt: "mock-prompt" }); + const rendered = render({ $prompt: "mock-prompt" }); expect(rendered).toMatchSnapshot(); }); }); diff --git a/packages/core/src/apis/cohere/chat.ts b/packages/core/src/apis/cohere/chat.ts index 837bed3..830e56c 100644 --- a/packages/core/src/apis/cohere/chat.ts +++ b/packages/core/src/apis/cohere/chat.ts @@ -104,7 +104,7 @@ export interface CohereChatOptions export const CohereChatTemplate = new FnTemplate( ({ modelId, - prompt, + $prompt, chat_history, examplePairs, system, @@ -130,7 +130,7 @@ export const CohereChatTemplate = new FnTemplate( }: CohereChatOptions) => { const rewritten = { model: modelId, - message: prompt, + message: $prompt, ...(chat_history || examplePairs ? { chat_history: [ diff --git a/packages/core/src/apis/cohere/generate.spec.ts b/packages/core/src/apis/cohere/generate.spec.ts index 5746fa6..cddf263 100644 --- a/packages/core/src/apis/cohere/generate.spec.ts +++ b/packages/core/src/apis/cohere/generate.spec.ts @@ -17,9 +17,9 @@ describe("CohereGenerateApi:", () => { * FewShotRequestOptions (prompt): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); @@ -31,7 +31,7 @@ describe("CohereGenerateApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", num_generations: 3, max_tokens: 1000, truncate: "END", @@ -54,9 +54,9 @@ describe("CohereGenerateApi:", () => { * Special cases: */ - test("prompt, stop_sequences and end_sequences", () => { + test("$prompt, stop_sequences and end_sequences", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", stop_sequences: ["mock-stop-seq"], end_sequences: ["mock-end-seq"], }); @@ -64,27 +64,27 @@ describe("CohereGenerateApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, truncate as NONE", () => { + test("$prompt, truncate as NONE", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", truncate: "NONE", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, return_likelihoods as NONE", () => { + test("$prompt, return_likelihoods as NONE", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", return_likelihoods: "NONE", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, logit_bias with multiple values", () => { + test("$prompt, logit_bias with multiple values", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", logit_bias: { 50256: -100, 50257: 50 }, }); diff --git a/packages/core/src/apis/cohere/generate.ts b/packages/core/src/apis/cohere/generate.ts index fc32642..273e6ac 100644 --- a/packages/core/src/apis/cohere/generate.ts +++ b/packages/core/src/apis/cohere/generate.ts @@ -35,7 +35,7 @@ export interface CohereGenerateOptions extends ModelRequestOptions { */ export const CohereGenerateTemplate = new FnTemplate( ({ - prompt, + $prompt, num_generations, stream, max_tokens, @@ -54,7 +54,7 @@ export const CohereGenerateTemplate = new FnTemplate( }: CohereGenerateOptions) => { return JSON.stringify( { - prompt, + prompt: $prompt, num_generations, stream, max_tokens, diff --git a/packages/core/src/apis/google/GoogleGeminiApi.spec.ts b/packages/core/src/apis/google/GoogleGeminiApi.spec.ts index f09c5cb..7859435 100644 --- a/packages/core/src/apis/google/GoogleGeminiApi.spec.ts +++ b/packages/core/src/apis/google/GoogleGeminiApi.spec.ts @@ -15,19 +15,19 @@ function render(context: Omit) { describe("GoogleGeminiApi.requestTemplate", () => { /** - * FewShotRequestOptions (prompt, examplePairs, system): + * FewShotRequestOptions ($prompt, examplePairs, system): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -36,17 +36,17 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system", () => { + test("$prompt, examplePairs, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -56,12 +56,12 @@ describe("GoogleGeminiApi.requestTemplate", () => { }); /** - * "Native" few shot options (prompt, contents, system_instruction): + * "Native" few shot options ($prompt, contents, system_instruction): */ - test("prompt, contents with user / model (appends prompt)", () => { + test("$prompt, contents with user / model (appends $prompt)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "user", @@ -76,9 +76,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, contents with model / user (prepends prompt)", () => { + test("$prompt, contents with model / user (prepends $prompt)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "model", @@ -93,9 +93,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, contents starting and ending with model (appends AND prepends prompt)", () => { + test("$prompt, contents starting and ending with model (appends AND prepends $prompt)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "model", @@ -114,9 +114,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, contents starting and ending with user (does not add prompt)", () => { + test("$prompt, contents starting and ending with user (does not add $prompt)", () => { const rendered = render({ - prompt: "mock-prompt-should-not-appear", + $prompt: "mock-prompt-should-not-appear", contents: [ { role: "user", @@ -135,9 +135,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system_instruction", () => { + test("$prompt, system_instruction", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system_instruction: { parts: [{ text: "mock-system-text" }], }, @@ -145,9 +145,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, contents, system_instruction", () => { + test("$prompt, contents, system_instruction", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "model", @@ -169,9 +169,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { * Combinations of FewShotRequestOptions and "native" options: */ - test("prompt, examplePairs, contents with user / model (appends prompt)", () => { + test("$prompt, examplePairs, contents with user / model (appends $prompt)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-example-pair", @@ -192,9 +192,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, contents with model / user (inserts prompt, conversation is valid)", () => { + test("$prompt, examplePairs, contents with model / user (inserts $prompt, conversation is valid)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-example-pair", @@ -215,9 +215,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system_instruction", () => { + test("$prompt, examplePairs, system_instruction", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -228,9 +228,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system, contents", () => { + test("$prompt, system, contents", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", contents: [ { @@ -242,9 +242,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system, system_instruction", () => { + test("$prompt, system, system_instruction", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", system_instruction: { parts: [ @@ -256,9 +256,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, contents, system_instruction", () => { + test("$prompt, examplePairs, contents, system_instruction", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -279,9 +279,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { * Tool-related: */ - test("prompt, contents ending with function_call, $tools with matching invocation (appends function_response content items)", () => { + test("$prompt, contents ending with function_call, $tools with matching invocation (appends function_response content items)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "user", @@ -324,9 +324,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, contents ending with model function_call, $tools without matching invocation (appends prompt; TODO logs warning)", () => { + test("$prompt, contents ending with model function_call, $tools without matching invocation (appends $prompt; TODO logs warning)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "user", @@ -364,9 +364,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { // TODO expect warning }); - test("prompt, contents ending with model function_call, no $tools (prepends and appends prompt; TODO logs warning)", () => { + test("$prompt, contents ending with model function_call, no $tools (prepends and appends $prompt; TODO logs warning)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "model", @@ -385,9 +385,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { // TODO expect warning }); - test("prompt, contents ending with user function_response (prepends prompt)", () => { + test("$prompt, contents ending with user function_response (prepends $prompt)", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "model", @@ -418,9 +418,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { /* * Tool declarations: */ - test("prompt, tools", () => { + test("$prompt, tools", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tools: [ { function_declarations: [ @@ -441,9 +441,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, $tools", () => { + test("$prompt, $tools", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", $tools: [ { name: "mock-function-1", @@ -482,9 +482,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, tools, $tools", () => { + test("$prompt, tools, $tools", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tools: [ { function_declarations: [ @@ -539,9 +539,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, tools_config", () => { + test("$prompt, tools_config", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tools_config: { mode: "AUTO", allowed_function_names: ["mock-function"], @@ -553,9 +553,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { /** * "Native" options: */ - test("prompt, safety_settings", () => { + test("$prompt, safety_settings", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", safety_settings: { category: "mock-category", threshold: "mock-threshold", @@ -564,9 +564,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, generation_config", () => { + test("$prompt, generation_config", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", generation_config: { temperature: 0.7, top_p: 0.9, @@ -575,9 +575,9 @@ describe("GoogleGeminiApi.requestTemplate", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, tool, tools_config, system_instruction, safety_settings, generation_config", () => { + test("$prompt, tool, tools_config, system_instruction, safety_settings, generation_config", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", contents: [ { role: "model", diff --git a/packages/core/src/apis/google/GoogleGeminiApi.ts b/packages/core/src/apis/google/GoogleGeminiApi.ts index ac23d21..01b20ae 100644 --- a/packages/core/src/apis/google/GoogleGeminiApi.ts +++ b/packages/core/src/apis/google/GoogleGeminiApi.ts @@ -24,7 +24,7 @@ import { FUNCTION_CALL_WITHOUT_TOOLS } from "./errors"; */ export const GoogleGeminiTemplate = new FnTemplate( ({ - prompt, + $prompt, examplePairs, system, $tools, @@ -47,13 +47,13 @@ export const GoogleGeminiTemplate = new FnTemplate( _contents = [ { role: "user", - parts: [{ text: prompt }], + parts: [{ text: $prompt }], }, ..._contents, ]; } - // the conversation must end with a user message (either prompt or tool responses): + // the conversation must end with a user message (either $prompt or tool responses): if (lastItem && lastItem.role === "model") { const functionCalls = lastItem.parts.filter( (part): part is PartWithFunctionCall => "functionCall" in part, @@ -73,12 +73,12 @@ export const GoogleGeminiTemplate = new FnTemplate( }, ]; } else { - // if no tool responses, we logged a warning in `applyFunctionCalls`, and now append prompt as fallback: + // if no tool responses, we logged a warning in `applyFunctionCalls`, and now append $prompt as fallback: _contents = [ ..._contents, { role: "user", - parts: [{ text: prompt }], + parts: [{ text: $prompt }], }, ]; } @@ -91,7 +91,7 @@ export const GoogleGeminiTemplate = new FnTemplate( ..._contents, { role: "user", - parts: [{ text: prompt }], + parts: [{ text: $prompt }], }, ]; } diff --git a/packages/core/src/apis/google/__snapshots__/GoogleGeminiApi.spec.ts.snap b/packages/core/src/apis/google/__snapshots__/GoogleGeminiApi.spec.ts.snap index ce41b52..8408aba 100644 --- a/packages/core/src/apis/google/__snapshots__/GoogleGeminiApi.spec.ts.snap +++ b/packages/core/src/apis/google/__snapshots__/GoogleGeminiApi.spec.ts.snap @@ -1,6 +1,6 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`GoogleGeminiApi.requestTemplate prompt 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt 1`] = ` { "contents": [ { @@ -15,7 +15,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, $tools 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, $tools 1`] = ` { "contents": [ { @@ -68,7 +68,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, $tools 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with function_call, $tools with matching invocation (appends function_response content items) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents ending with function_call, $tools with matching invocation (appends function_response content items) 1`] = ` { "contents": [ { @@ -133,7 +133,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with function_c } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with model function_call, $tools without matching invocation (appends prompt; TODO logs warning) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents ending with model function_call, $tools without matching invocation (appends $prompt; TODO logs warning) 1`] = ` { "contents": [ { @@ -189,7 +189,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with model func } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with model function_call, no $tools (prepends and appends prompt; TODO logs warning) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents ending with model function_call, no $tools (prepends and appends $prompt; TODO logs warning) 1`] = ` { "contents": [ { @@ -225,7 +225,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with model func } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with user function_response (prepends prompt) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents ending with user function_response (prepends $prompt) 1`] = ` { "contents": [ { @@ -266,7 +266,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with user funct } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents starting and ending with model (appends AND prepends prompt) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents starting and ending with model (appends AND prepends $prompt) 1`] = ` { "contents": [ { @@ -313,7 +313,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents starting and ending wi } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents starting and ending with user (does not add prompt) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents starting and ending with user (does not add $prompt) 1`] = ` { "contents": [ { @@ -344,7 +344,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents starting and ending wi } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents with model / user (prepends prompt) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents with model / user (prepends $prompt) 1`] = ` { "contents": [ { @@ -375,7 +375,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents with model / user (pre } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents with user / model (appends prompt) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents with user / model (appends $prompt) 1`] = ` { "contents": [ { @@ -406,7 +406,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents with user / model (app } `; -exports[`GoogleGeminiApi.requestTemplate prompt, contents, system_instruction 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, contents, system_instruction 1`] = ` { "contents": [ { @@ -444,7 +444,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, contents, system_instruction 1` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, examplePairs 1`] = ` { "contents": [ { @@ -491,7 +491,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents with model / user (inserts prompt, conversation is valid) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, examplePairs, contents with model / user (inserts $prompt, conversation is valid) 1`] = ` { "contents": [ { @@ -538,7 +538,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents with mod } `; -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents with user / model (appends prompt) 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, examplePairs, contents with user / model (appends $prompt) 1`] = ` { "contents": [ { @@ -585,7 +585,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents with use } `; -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents, system_instruction 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, examplePairs, contents, system_instruction 1`] = ` { "contents": [ { @@ -639,7 +639,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents, system_ } `; -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, system 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, examplePairs, system 1`] = ` { "contents": [ { @@ -677,7 +677,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, system 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, system_instruction 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, examplePairs, system_instruction 1`] = ` { "contents": [ { @@ -715,7 +715,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, system_instructio } `; -exports[`GoogleGeminiApi.requestTemplate prompt, generation_config 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, generation_config 1`] = ` { "contents": [ { @@ -734,7 +734,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, generation_config 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, safety_settings 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, safety_settings 1`] = ` { "contents": [ { @@ -753,7 +753,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, safety_settings 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, system 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, system 1`] = ` { "contents": [ { @@ -775,7 +775,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, system 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, system, contents 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, system, contents 1`] = ` { "contents": [ { @@ -813,7 +813,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, system, contents 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, system, system_instruction 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, system, system_instruction 1`] = ` { "contents": [ { @@ -841,7 +841,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, system, system_instruction 1`] } `; -exports[`GoogleGeminiApi.requestTemplate prompt, system_instruction 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, system_instruction 1`] = ` { "contents": [ { @@ -863,7 +863,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, system_instruction 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, tool, tools_config, system_instruction, safety_settings, generation_config 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, tool, tools_config, system_instruction, safety_settings, generation_config 1`] = ` { "contents": [ { @@ -921,7 +921,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, tool, tools_config, system_inst } `; -exports[`GoogleGeminiApi.requestTemplate prompt, tools 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, tools 1`] = ` { "contents": [ { @@ -954,7 +954,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, tools 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, tools, $tools 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, tools, $tools 1`] = ` { "contents": [ { @@ -1025,7 +1025,7 @@ exports[`GoogleGeminiApi.requestTemplate prompt, tools, $tools 1`] = ` } `; -exports[`GoogleGeminiApi.requestTemplate prompt, tools_config 1`] = ` +exports[`GoogleGeminiApi.requestTemplate $prompt, tools_config 1`] = ` { "contents": [ { diff --git a/packages/core/src/apis/google/__snapshots__/gemini.spec.ts.snap b/packages/core/src/apis/google/__snapshots__/gemini.spec.ts.snap deleted file mode 100644 index ce41b52..0000000 --- a/packages/core/src/apis/google/__snapshots__/gemini.spec.ts.snap +++ /dev/null @@ -1,1047 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`GoogleGeminiApi.requestTemplate prompt 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, $tools 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "tools": [ - { - "function_declarations": [ - { - "description": "mock-description-1", - "name": "mock-function-1", - "parameters": { - "properties": { - "mock-function-1-param-1": { - "description": "mock-function-1-param-1-description-1", - "type": "STRING", - }, - "mock-function-1-param-2": { - "description": "mock-function-1-param-2-description-2", - "type": "NUMBER", - }, - }, - "required": [], - "type": "OBJECT", - }, - }, - { - "description": "mock-description-2", - "name": "mock-function-2", - "parameters": { - "properties": { - "mock-function-2-param-1": { - "description": "mock-function-2-param-1-description-1", - "type": "BOOLEAN", - }, - }, - "required": [], - "type": "OBJECT", - }, - }, - ], - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with function_call, $tools with matching invocation (appends function_response content items) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "functionCall": { - "args": { - "key": "value", - }, - "name": "mock-function", - }, - }, - ], - "role": "model", - }, - { - "parts": [ - { - "function_response": { - "name": "mock-function", - "response": { - "returned": { - "responseKey": "responseValue", - }, - }, - }, - }, - ], - "role": "user", - }, - ], - "tools": [ - { - "function_declarations": [ - { - "description": "mock-description", - "name": "mock-function", - "parameters": { - "properties": { - "key": { - "description": "mock-key-description", - "type": "STRING", - }, - }, - "required": [ - "key", - ], - "type": "OBJECT", - }, - }, - ], - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with model function_call, $tools without matching invocation (appends prompt; TODO logs warning) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "functionCall": { - "args": { - "key": "value", - }, - "name": "mock-function", - }, - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "tools": [ - { - "function_declarations": [ - { - "description": "another-description", - "name": "another-function", - "parameters": { - "properties": { - "another-key": { - "description": "another-key-description", - "type": "STRING", - }, - }, - "required": [], - "type": "OBJECT", - }, - }, - ], - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with model function_call, no $tools (prepends and appends prompt; TODO logs warning) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "functionCall": { - "args": { - "key": "value", - }, - "name": "mock-function", - }, - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents ending with user function_response (prepends prompt) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "functionCall": { - "args": { - "key": "value", - }, - "name": "mock-function", - }, - }, - ], - "role": "model", - }, - { - "parts": [ - { - "function_response": { - "name": "mock-function", - "response": { - "responseKey": "responseValue", - }, - }, - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents starting and ending with model (appends AND prepends prompt) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text-2", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents starting and ending with user (does not add prompt) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-text-2", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents with model / user (prepends prompt) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-text-2", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents with user / model (appends prompt) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, contents, system_instruction 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-msg-1", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-msg-1", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-msg-2", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-msg-2", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents with model / user (inserts prompt, conversation is valid) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-example-pair", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-example-pair", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents with user / model (appends prompt) 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-example-pair", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-example-pair", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-user-text", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, contents, system_instruction 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-msg-1", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-msg-1", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, system 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-msg-1", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-msg-1", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, examplePairs, system_instruction 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-user-msg-1", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-assistant-msg-1", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, generation_config 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "generation_config": { - "temperature": 0.7, - "top_p": 0.9, - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, safety_settings 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "safety_settings": { - "category": "mock-category", - "threshold": "mock-threshold", - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, system 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, system, contents 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, system, system_instruction 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - { - "text": "mock-additional-instruction", - }, - { - "text": "mock-additional-instruction-2", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, system_instruction 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, tool, tools_config, system_instruction, safety_settings, generation_config 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - { - "parts": [ - { - "text": "mock-model-text", - }, - ], - "role": "model", - }, - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "generation_config": { - "max_output_tokens": 100, - "top_p": 0.7, - }, - "safety_settings": { - "method": "mock-method", - "threshold": "mock-threshold", - }, - "system_instruction": { - "parts": [ - { - "text": "mock-system-text", - }, - ], - }, - "tools": [ - { - "function_declarations": [ - { - "name": "mock-function", - }, - ], - }, - ], - "tools_config": { - "mode": "ANY", - }, -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, tools 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "tools": [ - { - "function_declarations": [ - { - "description": "mock-description", - "name": "mock-function", - "parameters": { - "properties": { - "key": { - "type": "STRING", - }, - }, - "type": "OBJECT", - }, - }, - ], - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, tools, $tools 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "tools": [ - { - "function_declarations": [ - { - "description": "mock-description", - "name": "mock-function", - "parameters": { - "properties": { - "key": { - "type": "STRING", - }, - }, - "type": "OBJECT", - }, - }, - ], - }, - { - "function_declarations": [ - { - "description": "mock-description-1", - "name": "mock-function-1", - "parameters": { - "properties": { - "mock-function-1-param-1": { - "description": "mock-function-1-param-1-description-1", - "type": "STRING", - }, - "mock-function-1-param-2": { - "description": "mock-function-1-param-2-description-2", - "type": "NUMBER", - }, - }, - "required": [ - "mock-function-1-param-2", - ], - "type": "OBJECT", - }, - }, - { - "description": "mock-description-2", - "name": "mock-function-2", - "parameters": { - "properties": { - "mock-function-2-param-1": { - "description": "mock-function-2-param-1-description-1", - "type": "BOOLEAN", - }, - }, - "required": [], - "type": "OBJECT", - }, - }, - ], - }, - ], -} -`; - -exports[`GoogleGeminiApi.requestTemplate prompt, tools_config 1`] = ` -{ - "contents": [ - { - "parts": [ - { - "text": "mock-prompt", - }, - ], - "role": "user", - }, - ], - "tools_config": { - "allowed_function_names": [ - "mock-function", - ], - "mode": "AUTO", - }, -} -`; diff --git a/packages/core/src/apis/huggingface/__snapshots__/hfConversationTaskApi.spec.ts.snap b/packages/core/src/apis/huggingface/__snapshots__/hfConversationTaskApi.spec.ts.snap index cbe1059..e064338 100644 --- a/packages/core/src/apis/huggingface/__snapshots__/hfConversationTaskApi.spec.ts.snap +++ b/packages/core/src/apis/huggingface/__snapshots__/hfConversationTaskApi.spec.ts.snap @@ -1,39 +1,12 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`HfConversationalTaskOptions: all options 1`] = ` -{ - "generated_responses": [ - "response1", - "response2", - ], - "inputs": "mock-prompt", - "options": { - "use_cache": true, - "wait_for_model": false, - }, - "parameters": { - "max_length": 100, - "max_time": 60, - "min_length": 10, - "repetition_penalty": 1.2, - "temperature": 0.7, - "top_k": 50, - "top_p": 0.9, - }, - "past_user_inputs": [ - "input1", - "input2", - ], -} -`; - -exports[`HfConversationalTaskOptions: prompt 1`] = ` +exports[`HfConversationalTaskOptions: $prompt 1`] = ` { "inputs": "mock-prompt", } `; -exports[`HfConversationalTaskOptions: prompt, generated_responses 1`] = ` +exports[`HfConversationalTaskOptions: $prompt, generated_responses 1`] = ` { "generated_responses": [ "response1", @@ -43,7 +16,7 @@ exports[`HfConversationalTaskOptions: prompt, generated_responses 1`] = ` } `; -exports[`HfConversationalTaskOptions: prompt, options 1`] = ` +exports[`HfConversationalTaskOptions: $prompt, options 1`] = ` { "inputs": "mock-prompt", "options": { @@ -53,7 +26,7 @@ exports[`HfConversationalTaskOptions: prompt, options 1`] = ` } `; -exports[`HfConversationalTaskOptions: prompt, parameters 1`] = ` +exports[`HfConversationalTaskOptions: $prompt, parameters 1`] = ` { "inputs": "mock-prompt", "parameters": { @@ -68,7 +41,7 @@ exports[`HfConversationalTaskOptions: prompt, parameters 1`] = ` } `; -exports[`HfConversationalTaskOptions: prompt, past_user_inputs 1`] = ` +exports[`HfConversationalTaskOptions: $prompt, past_user_inputs 1`] = ` { "inputs": "mock-prompt", "past_user_inputs": [ @@ -78,7 +51,7 @@ exports[`HfConversationalTaskOptions: prompt, past_user_inputs 1`] = ` } `; -exports[`HfConversationalTaskOptions: prompt, past_user_inputs, generated_responses 1`] = ` +exports[`HfConversationalTaskOptions: $prompt, past_user_inputs, generated_responses 1`] = ` { "generated_responses": [ "response1", @@ -91,3 +64,30 @@ exports[`HfConversationalTaskOptions: prompt, past_user_inputs, generated_respon ], } `; + +exports[`HfConversationalTaskOptions: all options 1`] = ` +{ + "generated_responses": [ + "response1", + "response2", + ], + "inputs": "mock-prompt", + "options": { + "use_cache": true, + "wait_for_model": false, + }, + "parameters": { + "max_length": 100, + "max_time": 60, + "min_length": 10, + "repetition_penalty": 1.2, + "temperature": 0.7, + "top_k": 50, + "top_p": 0.9, + }, + "past_user_inputs": [ + "input1", + "input2", + ], +} +`; diff --git a/packages/core/src/apis/huggingface/__snapshots__/hfTextGenerationApi.spec.ts.snap b/packages/core/src/apis/huggingface/__snapshots__/hfTextGenerationApi.spec.ts.snap index a2425a3..d17e858 100644 --- a/packages/core/src/apis/huggingface/__snapshots__/hfTextGenerationApi.spec.ts.snap +++ b/packages/core/src/apis/huggingface/__snapshots__/hfTextGenerationApi.spec.ts.snap @@ -1,12 +1,18 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`HfInferenceApi: all options 1`] = ` +exports[`HfInferenceApi: $prompt and options 1`] = ` { "inputs": "mock-prompt", "options": { "use_cache": true, "wait_for_model": false, }, +} +`; + +exports[`HfInferenceApi: $prompt and parameters 1`] = ` +{ + "inputs": "mock-prompt", "parameters": { "do_sample": true, "max_new_tokens": 100, @@ -21,33 +27,19 @@ exports[`HfInferenceApi: all options 1`] = ` } `; -exports[`HfInferenceApi: empty options 1`] = ` -{ - "inputs": "mock-prompt", - "options": {}, -} -`; - -exports[`HfInferenceApi: empty parameters 1`] = ` +exports[`HfInferenceApi: $prompt only 1`] = ` { "inputs": "mock-prompt", - "parameters": {}, } `; -exports[`HfInferenceApi: prompt and options 1`] = ` +exports[`HfInferenceApi: $prompt, parameters, and options 1`] = ` { "inputs": "mock-prompt", "options": { "use_cache": true, "wait_for_model": false, }, -} -`; - -exports[`HfInferenceApi: prompt and parameters 1`] = ` -{ - "inputs": "mock-prompt", "parameters": { "do_sample": true, "max_new_tokens": 100, @@ -62,13 +54,7 @@ exports[`HfInferenceApi: prompt and parameters 1`] = ` } `; -exports[`HfInferenceApi: prompt only 1`] = ` -{ - "inputs": "mock-prompt", -} -`; - -exports[`HfInferenceApi: prompt, parameters, and options 1`] = ` +exports[`HfInferenceApi: all options 1`] = ` { "inputs": "mock-prompt", "options": { @@ -88,3 +74,17 @@ exports[`HfInferenceApi: prompt, parameters, and options 1`] = ` }, } `; + +exports[`HfInferenceApi: empty options 1`] = ` +{ + "inputs": "mock-prompt", + "options": {}, +} +`; + +exports[`HfInferenceApi: empty parameters 1`] = ` +{ + "inputs": "mock-prompt", + "parameters": {}, +} +`; diff --git a/packages/core/src/apis/huggingface/hfConversationTaskApi.spec.ts b/packages/core/src/apis/huggingface/hfConversationTaskApi.spec.ts index b0c9ba3..01921a8 100644 --- a/packages/core/src/apis/huggingface/hfConversationTaskApi.spec.ts +++ b/packages/core/src/apis/huggingface/hfConversationTaskApi.spec.ts @@ -16,35 +16,35 @@ function render(context: Omit) { } describe("HfConversationalTaskOptions:", () => { - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, past_user_inputs", () => { + test("$prompt, past_user_inputs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", past_user_inputs: ["input1", "input2"], }); expect(rendered).toMatchSnapshot(); }); - test("prompt, generated_responses", () => { + test("$prompt, generated_responses", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", generated_responses: ["response1", "response2"], }); expect(rendered).toMatchSnapshot(); }); - test("prompt, past_user_inputs, generated_responses", () => { + test("$prompt, past_user_inputs, generated_responses", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", past_user_inputs: ["input1", "input2"], generated_responses: ["response1", "response2"], }); @@ -52,9 +52,9 @@ describe("HfConversationalTaskOptions:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, parameters", () => { + test("$prompt, parameters", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", parameters: { min_length: 10, max_length: 100, @@ -69,9 +69,9 @@ describe("HfConversationalTaskOptions:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, options", () => { + test("$prompt, options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", options: { use_cache: true, wait_for_model: false, @@ -83,7 +83,7 @@ describe("HfConversationalTaskOptions:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", past_user_inputs: ["input1", "input2"], generated_responses: ["response1", "response2"], parameters: { diff --git a/packages/core/src/apis/huggingface/hfConversationTaskApi.ts b/packages/core/src/apis/huggingface/hfConversationTaskApi.ts index ac846ab..d594d96 100644 --- a/packages/core/src/apis/huggingface/hfConversationTaskApi.ts +++ b/packages/core/src/apis/huggingface/hfConversationTaskApi.ts @@ -33,14 +33,14 @@ export interface HfConversationalTaskOptions extends HfInferenceApiOptions { */ export const HfConversationalTaskTemplate = new FnTemplate( ({ - prompt, + $prompt, past_user_inputs, generated_responses, parameters, options, }: HfConversationalTaskOptions) => { const rewritten = { - inputs: prompt, + inputs: $prompt, }; const result = { diff --git a/packages/core/src/apis/huggingface/hfTextGenerationApi.spec.ts b/packages/core/src/apis/huggingface/hfTextGenerationApi.spec.ts index 81b8ffe..ad98695 100644 --- a/packages/core/src/apis/huggingface/hfTextGenerationApi.spec.ts +++ b/packages/core/src/apis/huggingface/hfTextGenerationApi.spec.ts @@ -16,17 +16,17 @@ function render(context: Omit) { } describe("HfInferenceApi:", () => { - test("prompt only", () => { + test("$prompt only", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt and parameters", () => { + test("$prompt and parameters", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", parameters: { top_k: 50, top_p: 0.9, @@ -43,9 +43,9 @@ describe("HfInferenceApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt and options", () => { + test("$prompt and options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", options: { use_cache: true, wait_for_model: false, @@ -55,9 +55,9 @@ describe("HfInferenceApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, parameters, and options", () => { + test("$prompt, parameters, and options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", parameters: { top_k: 50, top_p: 0.9, @@ -80,7 +80,7 @@ describe("HfInferenceApi:", () => { test("empty parameters", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", parameters: {}, }); @@ -89,7 +89,7 @@ describe("HfInferenceApi:", () => { test("empty options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", options: {}, }); @@ -98,7 +98,7 @@ describe("HfInferenceApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", parameters: { top_k: 50, top_p: 0.9, diff --git a/packages/core/src/apis/huggingface/hfTextGenerationApi.ts b/packages/core/src/apis/huggingface/hfTextGenerationApi.ts index 92bf59e..523e360 100644 --- a/packages/core/src/apis/huggingface/hfTextGenerationApi.ts +++ b/packages/core/src/apis/huggingface/hfTextGenerationApi.ts @@ -31,9 +31,9 @@ export interface HfTextGenerationTaskOptions extends HfInferenceApiOptions { * @category Huggingface Text Generation Task */ export const HfTextGenerationTaskTemplate = new FnTemplate( - ({ prompt, parameters, options }: HfTextGenerationTaskOptions) => { + ({ $prompt, parameters, options }: HfTextGenerationTaskOptions) => { const rewritten = { - inputs: prompt, + inputs: $prompt, }; return JSON.stringify( diff --git a/packages/core/src/apis/meta/__snapshots__/llama2ChatApi.spec.ts.snap b/packages/core/src/apis/meta/__snapshots__/llama2ChatApi.spec.ts.snap index aef8580..de8ec13 100644 --- a/packages/core/src/apis/meta/__snapshots__/llama2ChatApi.spec.ts.snap +++ b/packages/core/src/apis/meta/__snapshots__/llama2ChatApi.spec.ts.snap @@ -1,31 +1,18 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`Llama2ChatApi: all options 1`] = ` -{ - "max_gen_len": 1500, - "prompt": "[INST] <> -mock-system-text -<> - -mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-user-msg-2 [/INST] mock-assistant-msg-2 [INST] mock-user-msg-3 [/INST] mock-assistant-msg-3 [INST] mock-prompt [/INST]", - "temperature": 0.7, - "top_p": 0.9, -} -`; - -exports[`Llama2ChatApi: prompt 1`] = ` +exports[`Llama2ChatApi: $prompt 1`] = ` { "prompt": "[INST] mock-prompt [/INST]", } `; -exports[`Llama2ChatApi: prompt, examplePairs 1`] = ` +exports[`Llama2ChatApi: $prompt, examplePairs 1`] = ` { "prompt": "[INST] mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-user-msg-2 [/INST] mock-assistant-msg-2 [INST] mock-prompt [/INST]", } `; -exports[`Llama2ChatApi: prompt, examplePairs, system 1`] = ` +exports[`Llama2ChatApi: $prompt, examplePairs, system 1`] = ` { "prompt": "[INST] <> mock-system-text @@ -35,7 +22,7 @@ mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-prompt [/INST]", } `; -exports[`Llama2ChatApi: prompt, system 1`] = ` +exports[`Llama2ChatApi: $prompt, system 1`] = ` { "prompt": "[INST] <> mock-system-text @@ -44,3 +31,16 @@ mock-system-text mock-prompt [/INST]", } `; + +exports[`Llama2ChatApi: all options 1`] = ` +{ + "max_gen_len": 1500, + "prompt": "[INST] <> +mock-system-text +<> + +mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-user-msg-2 [/INST] mock-assistant-msg-2 [INST] mock-user-msg-3 [/INST] mock-assistant-msg-3 [INST] mock-prompt [/INST]", + "temperature": 0.7, + "top_p": 0.9, +} +`; diff --git a/packages/core/src/apis/meta/__snapshots__/llama3ChatApi.spec.ts.snap b/packages/core/src/apis/meta/__snapshots__/llama3ChatApi.spec.ts.snap index 4783ffe..e482d2e 100644 --- a/packages/core/src/apis/meta/__snapshots__/llama3ChatApi.spec.ts.snap +++ b/packages/core/src/apis/meta/__snapshots__/llama3ChatApi.spec.ts.snap @@ -1,11 +1,16 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`Llama3ChatApi: all options 1`] = ` +exports[`Llama3ChatApi: $prompt 1`] = ` { - "max_gen_len": 512, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|> + "prompt": "<|begin_of_text|><|start_header_id|>user<|end_header_id|> -mock-system-text<|eot_id|><|start_header_id|>user<|end_header_id|> +mock-prompt<|eot_id|><|start_header_id|>assistant<|end_header_id|>", +} +`; + +exports[`Llama3ChatApi: $prompt, examplePairs 1`] = ` +{ + "prompt": "<|begin_of_text|><|start_header_id|>user<|end_header_id|> mock-user-msg-1<|eot_id|><|start_header_id|>assistant<|end_header_id|> @@ -16,22 +21,24 @@ mock-user-msg-2<|eot_id|><|start_header_id|>assistant<|end_header_id|> mock-assistant-msg-2<|eot_id|><|start_header_id|>user<|end_header_id|> mock-prompt<|eot_id|><|start_header_id|>assistant<|end_header_id|>", - "temperature": 0.7, - "top_p": 0.9, } `; -exports[`Llama3ChatApi: prompt 1`] = ` +exports[`Llama3ChatApi: $prompt, system 1`] = ` { - "prompt": "<|begin_of_text|><|start_header_id|>user<|end_header_id|> + "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|> + +mock-system-text<|eot_id|><|start_header_id|>user<|end_header_id|> mock-prompt<|eot_id|><|start_header_id|>assistant<|end_header_id|>", } `; -exports[`Llama3ChatApi: prompt, examplePairs 1`] = ` +exports[`Llama3ChatApi: $prompt, system, examplePairs 1`] = ` { - "prompt": "<|begin_of_text|><|start_header_id|>user<|end_header_id|> + "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|> + +mock-system-text<|eot_id|><|start_header_id|>user<|end_header_id|> mock-user-msg-1<|eot_id|><|start_header_id|>assistant<|end_header_id|> @@ -45,18 +52,9 @@ mock-prompt<|eot_id|><|start_header_id|>assistant<|end_header_id|>", } `; -exports[`Llama3ChatApi: prompt, system 1`] = ` -{ - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|> - -mock-system-text<|eot_id|><|start_header_id|>user<|end_header_id|> - -mock-prompt<|eot_id|><|start_header_id|>assistant<|end_header_id|>", -} -`; - -exports[`Llama3ChatApi: prompt, system, examplePairs 1`] = ` +exports[`Llama3ChatApi: all options 1`] = ` { + "max_gen_len": 512, "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|> mock-system-text<|eot_id|><|start_header_id|>user<|end_header_id|> @@ -70,5 +68,7 @@ mock-user-msg-2<|eot_id|><|start_header_id|>assistant<|end_header_id|> mock-assistant-msg-2<|eot_id|><|start_header_id|>user<|end_header_id|> mock-prompt<|eot_id|><|start_header_id|>assistant<|end_header_id|>", + "temperature": 0.7, + "top_p": 0.9, } `; diff --git a/packages/core/src/apis/meta/llama2ChatApi.spec.ts b/packages/core/src/apis/meta/llama2ChatApi.spec.ts index d67cb14..f211e14 100644 --- a/packages/core/src/apis/meta/llama2ChatApi.spec.ts +++ b/packages/core/src/apis/meta/llama2ChatApi.spec.ts @@ -17,17 +17,17 @@ describe("Llama2ChatApi:", () => { * FewShotRequestOptions (prompt, examplePairs, system): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -37,18 +37,18 @@ describe("Llama2ChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system", () => { + test("$prompt, examplePairs, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -64,7 +64,7 @@ describe("Llama2ChatApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, diff --git a/packages/core/src/apis/meta/llama2ChatApi.ts b/packages/core/src/apis/meta/llama2ChatApi.ts index aa9f83d..e5605ed 100644 --- a/packages/core/src/apis/meta/llama2ChatApi.ts +++ b/packages/core/src/apis/meta/llama2ChatApi.ts @@ -7,8 +7,6 @@ import type { FewShotRequestOptions } from "../shared"; import { isLlamaResponse, LlamaResponse } from "./llama"; -export const Llama2ChatMlTemplateSource = `[INST] <% if (typeof system !== 'undefined') { %><>\\n<%= system %>\\n<>\\n\\n<% } %><% (typeof examplePairs !== 'undefined' ? examplePairs : []).forEach(pair => { %><%= pair.user %> [/INST] <%= pair.assistant %> [INST] <% }) %><%= prompt %> [/INST]`; - /** * @category Requests * @category Llama2 @@ -28,7 +26,7 @@ export interface Llama2ChatOptions */ export const Llama2ChatTemplate = new FnTemplate( ({ - prompt, + $prompt, system, examplePairs, temperature, @@ -44,7 +42,7 @@ export const Llama2ChatTemplate = new FnTemplate( `${pair.user} [/INST] ${pair.assistant} [INST] `, ]) : []), - `${prompt} [/INST]`, + `${$prompt} [/INST]`, ].join(""), }; diff --git a/packages/core/src/apis/meta/llama3ChatApi.spec.ts b/packages/core/src/apis/meta/llama3ChatApi.spec.ts index cecfa48..650c94a 100644 --- a/packages/core/src/apis/meta/llama3ChatApi.spec.ts +++ b/packages/core/src/apis/meta/llama3ChatApi.spec.ts @@ -17,26 +17,26 @@ describe("Llama3ChatApi:", () => { * FewShotRequestOptions (prompt, examplePairs, system): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -46,9 +46,9 @@ describe("Llama3ChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system, examplePairs", () => { + test("$prompt, system, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, @@ -65,7 +65,7 @@ describe("Llama3ChatApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, diff --git a/packages/core/src/apis/meta/llama3ChatApi.ts b/packages/core/src/apis/meta/llama3ChatApi.ts index dfcbbcc..4d32d2e 100644 --- a/packages/core/src/apis/meta/llama3ChatApi.ts +++ b/packages/core/src/apis/meta/llama3ChatApi.ts @@ -25,7 +25,7 @@ export interface Llama3ChatOptions */ export const Llama3ChatTemplate = new FnTemplate( ({ - prompt, + $prompt, system, examplePairs, temperature, @@ -42,7 +42,7 @@ export const Llama3ChatTemplate = new FnTemplate( `<|start_header_id|>assistant<|end_header_id|>\n\n${pair.assistant}<|eot_id|>`, ]) : []), - `<|start_header_id|>user<|end_header_id|>\n\n${prompt}<|eot_id|>`, + `<|start_header_id|>user<|end_header_id|>\n\n${$prompt}<|eot_id|>`, `<|start_header_id|>assistant<|end_header_id|>`, ].join(""); diff --git a/packages/core/src/apis/mistral/__snapshots__/mistralAiApi.spec.ts.snap b/packages/core/src/apis/mistral/__snapshots__/mistralAiApi.spec.ts.snap index 0d6bd69..f712302 100644 --- a/packages/core/src/apis/mistral/__snapshots__/mistralAiApi.spec.ts.snap +++ b/packages/core/src/apis/mistral/__snapshots__/mistralAiApi.spec.ts.snap @@ -1,40 +1,6 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`MistralAiApi: all options 1`] = ` -{ - "max_tokens": 1000, - "messages": [ - { - "content": "mock-system-text", - "role": "system", - }, - { - "content": "mock-user-msg-1", - "role": "user", - }, - { - "content": "mock-assistant-msg-1", - "role": "assistant", - }, - { - "content": "mock-assistant-text", - "role": "assistant", - }, - { - "content": "mock-prompt", - "role": "user", - }, - ], - "model": "mock-model-id", - "random_seed": 1234, - "safe_prompt": true, - "stream": true, - "temperature": 0.7, - "top_p": 0.9, -} -`; - -exports[`MistralAiApi: prompt 1`] = ` +exports[`MistralAiApi: $prompt 1`] = ` { "messages": [ { @@ -46,7 +12,7 @@ exports[`MistralAiApi: prompt 1`] = ` } `; -exports[`MistralAiApi: prompt 2`] = ` +exports[`MistralAiApi: $prompt 2`] = ` { "messages": [ { @@ -58,7 +24,7 @@ exports[`MistralAiApi: prompt 2`] = ` } `; -exports[`MistralAiApi: prompt, examplePairs 1`] = ` +exports[`MistralAiApi: $prompt, examplePairs 1`] = ` { "messages": [ { @@ -86,7 +52,7 @@ exports[`MistralAiApi: prompt, examplePairs 1`] = ` } `; -exports[`MistralAiApi: prompt, examplePairs, messages 1`] = ` +exports[`MistralAiApi: $prompt, examplePairs, messages 1`] = ` { "messages": [ { @@ -110,7 +76,7 @@ exports[`MistralAiApi: prompt, examplePairs, messages 1`] = ` } `; -exports[`MistralAiApi: prompt, examplePairs, system 1`] = ` +exports[`MistralAiApi: $prompt, examplePairs, system 1`] = ` { "messages": [ { @@ -134,7 +100,7 @@ exports[`MistralAiApi: prompt, examplePairs, system 1`] = ` } `; -exports[`MistralAiApi: prompt, examplePairs, system, messages 1`] = ` +exports[`MistralAiApi: $prompt, examplePairs, system, messages 1`] = ` { "messages": [ { @@ -162,7 +128,7 @@ exports[`MistralAiApi: prompt, examplePairs, system, messages 1`] = ` } `; -exports[`MistralAiApi: prompt, messages 1`] = ` +exports[`MistralAiApi: $prompt, messages 1`] = ` { "messages": [ { @@ -186,7 +152,7 @@ exports[`MistralAiApi: prompt, messages 1`] = ` } `; -exports[`MistralAiApi: prompt, system 1`] = ` +exports[`MistralAiApi: $prompt, system 1`] = ` { "messages": [ { @@ -202,7 +168,7 @@ exports[`MistralAiApi: prompt, system 1`] = ` } `; -exports[`MistralAiApi: prompt, system, messages 1`] = ` +exports[`MistralAiApi: $prompt, system, messages 1`] = ` { "messages": [ { @@ -229,3 +195,37 @@ exports[`MistralAiApi: prompt, system, messages 1`] = ` "model": "mock-model-id", } `; + +exports[`MistralAiApi: all options 1`] = ` +{ + "max_tokens": 1000, + "messages": [ + { + "content": "mock-system-text", + "role": "system", + }, + { + "content": "mock-user-msg-1", + "role": "user", + }, + { + "content": "mock-assistant-msg-1", + "role": "assistant", + }, + { + "content": "mock-assistant-text", + "role": "assistant", + }, + { + "content": "mock-prompt", + "role": "user", + }, + ], + "model": "mock-model-id", + "random_seed": 1234, + "safe_prompt": true, + "stream": true, + "temperature": 0.7, + "top_p": 0.9, +} +`; diff --git a/packages/core/src/apis/mistral/__snapshots__/mistralBedrockApi.spec.ts.snap b/packages/core/src/apis/mistral/__snapshots__/mistralBedrockApi.spec.ts.snap index 28e94e2..b4b3709 100644 --- a/packages/core/src/apis/mistral/__snapshots__/mistralBedrockApi.spec.ts.snap +++ b/packages/core/src/apis/mistral/__snapshots__/mistralBedrockApi.spec.ts.snap @@ -1,35 +1,18 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`MistralBedrockApi: all options 1`] = ` -{ - "max_tokens": 1000, - "prompt": "[INST] <> -mock-system-text -<> - -mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-prompt [/INST]", - "stop": [ - "mock-stop-1", - ], - "temperature": 0.7, - "top_k": 50, - "top_p": 0.9, -} -`; - -exports[`MistralBedrockApi: prompt 1`] = ` +exports[`MistralBedrockApi: $prompt 1`] = ` { "prompt": "[INST] mock-prompt [/INST]", } `; -exports[`MistralBedrockApi: prompt, examplePairs 1`] = ` +exports[`MistralBedrockApi: $prompt, examplePairs 1`] = ` { "prompt": "[INST] mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-user-msg-2 [/INST] mock-assistant-msg-2 [INST] mock-prompt [/INST]", } `; -exports[`MistralBedrockApi: prompt, examplePairs, system 1`] = ` +exports[`MistralBedrockApi: $prompt, examplePairs, system 1`] = ` { "prompt": "[INST] <> mock-system-text @@ -39,7 +22,7 @@ mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-user-msg-2 [/INS } `; -exports[`MistralBedrockApi: prompt, system 1`] = ` +exports[`MistralBedrockApi: $prompt, system 1`] = ` { "prompt": "[INST] <> mock-system-text @@ -48,3 +31,20 @@ mock-system-text mock-prompt [/INST]", } `; + +exports[`MistralBedrockApi: all options 1`] = ` +{ + "max_tokens": 1000, + "prompt": "[INST] <> +mock-system-text +<> + +mock-user-msg-1 [/INST] mock-assistant-msg-1 [INST] mock-prompt [/INST]", + "stop": [ + "mock-stop-1", + ], + "temperature": 0.7, + "top_k": 50, + "top_p": 0.9, +} +`; diff --git a/packages/core/src/apis/mistral/mistralAiApi.spec.ts b/packages/core/src/apis/mistral/mistralAiApi.spec.ts index 8db577a..6317b74 100644 --- a/packages/core/src/apis/mistral/mistralAiApi.spec.ts +++ b/packages/core/src/apis/mistral/mistralAiApi.spec.ts @@ -14,12 +14,12 @@ function render(context: Omit) { describe("MistralAiApi:", () => { /** - * FewShotRequestOptions (prompt, examplePairs, system): + * FewShotRequestOptions ($prompt, examplePairs, system): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); @@ -27,9 +27,9 @@ describe("MistralAiApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -39,18 +39,18 @@ describe("MistralAiApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system", () => { + test("$prompt, examplePairs, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -64,9 +64,9 @@ describe("MistralAiApi:", () => { * "Native" few shot options (messages): */ - test("prompt, messages", () => { + test("$prompt, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", messages: [ { role: "assistant", @@ -86,9 +86,9 @@ describe("MistralAiApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, messages", () => { + test("$prompt, examplePairs, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -103,9 +103,9 @@ describe("MistralAiApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system, messages", () => { + test("$prompt, system, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", messages: [ { @@ -126,9 +126,9 @@ describe("MistralAiApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system, messages", () => { + test("$prompt, examplePairs, system, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -150,7 +150,7 @@ describe("MistralAiApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], diff --git a/packages/core/src/apis/mistral/mistralAiApi.ts b/packages/core/src/apis/mistral/mistralAiApi.ts index 68a0a26..f9db756 100644 --- a/packages/core/src/apis/mistral/mistralAiApi.ts +++ b/packages/core/src/apis/mistral/mistralAiApi.ts @@ -35,7 +35,7 @@ export interface MistralAiOptions export const MistralAiTemplate = new FnTemplate( ({ modelId, - prompt, + $prompt, system, examplePairs, messages, @@ -62,7 +62,7 @@ export const MistralAiTemplate = new FnTemplate( content: message.content, })) : []), - { role: "user", content: prompt }, + { role: "user", content: $prompt }, ], }; diff --git a/packages/core/src/apis/mistral/mistralBedrockApi.spec.ts b/packages/core/src/apis/mistral/mistralBedrockApi.spec.ts index 44dc2d0..14fc2a6 100644 --- a/packages/core/src/apis/mistral/mistralBedrockApi.spec.ts +++ b/packages/core/src/apis/mistral/mistralBedrockApi.spec.ts @@ -14,20 +14,20 @@ function render(context: Omit) { describe("MistralBedrockApi:", () => { /** - * FewShotRequestOptions (prompt, examplePairs, system): + * FewShotRequestOptions ($prompt, examplePairs, system): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -37,18 +37,18 @@ describe("MistralBedrockApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system", () => { + test("$prompt, examplePairs, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -65,7 +65,7 @@ describe("MistralBedrockApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -79,30 +79,4 @@ describe("MistralBedrockApi:", () => { expect(rendered).toMatchSnapshot(); }); - - /** - * Edge cases: - */ - - // test("prompt with special characters", () => { - // const rendered = render({ - // prompt: 'mock-prompt with special characters: "\n \t \\"', - // }); - - // expect(rendered).toMatchSnapshot(); - // }); - - // test("examplePairs and prompt with special characters", () => { - // const rendered = render({ - // prompt: 'mock-prompt with special characters: "\n \t \\"', - // examplePairs: [ - // { - // user: 'mock-user-msg with special characters: "\n \t \\"', - // assistant: 'mock-assistant-msg with special characters: "\n \t \\"', - // }, - // ], - // }); - - // expect(rendered).toMatchSnapshot(); - // }); }); diff --git a/packages/core/src/apis/mistral/mistralBedrockApi.ts b/packages/core/src/apis/mistral/mistralBedrockApi.ts index 8b28f66..3bed684 100644 --- a/packages/core/src/apis/mistral/mistralBedrockApi.ts +++ b/packages/core/src/apis/mistral/mistralBedrockApi.ts @@ -29,7 +29,7 @@ export interface MistralBedrockOptions */ export const MistralBedrockTemplate = new FnTemplate( ({ - prompt, + $prompt, system, examplePairs, max_tokens, @@ -47,7 +47,7 @@ export const MistralBedrockTemplate = new FnTemplate( `${pair.user} [/INST] ${pair.assistant} [INST] `, ]) : []), - `${prompt} [/INST]`, + `${$prompt} [/INST]`, ].join(""); const rewritten = { diff --git a/packages/core/src/apis/openai/__snapshots__/openAiChatApi.spec.ts.snap b/packages/core/src/apis/openai/__snapshots__/openAiChatApi.spec.ts.snap index 9611251..ab1f5d6 100644 --- a/packages/core/src/apis/openai/__snapshots__/openAiChatApi.spec.ts.snap +++ b/packages/core/src/apis/openai/__snapshots__/openAiChatApi.spec.ts.snap @@ -1,99 +1,6 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`OpenAiChatApi: all options 1`] = ` -{ - "frequency_penalty": 0.5, - "function_call": "auto", - "functions": [ - { - "description": "mock-description", - "name": "mock-function", - "parameters": { - "properties": { - "key": { - "type": "string", - }, - }, - "type": "object", - }, - }, - ], - "logit_bias": { - "50256": -100, - }, - "logprobs": true, - "max_tokens": 1000, - "messages": [ - { - "content": "mock-system-text", - "role": "system", - }, - { - "content": "mock-user-msg-1", - "role": "user", - }, - { - "content": "mock-assistant-msg-1", - "role": "assistant", - }, - { - "content": "mock-assistant-text", - "function_call": { - "arguments": "{ "key": "value" }", - "name": "mock-function", - }, - "role": "assistant", - }, - { - "content": "mock-prompt", - "role": "user", - }, - ], - "model": "mock-model-id", - "n": 3, - "presence_penalty": 0.3, - "response_format": { - "type": "json_object", - }, - "seed": 1234, - "stop": [ - "mock-stop", - ], - "stream": true, - "stream_options": { - "include_usage": true, - }, - "temperature": 0.7, - "tool_choice": { - "function": { - "name": "mock-tool-choice-function", - }, - "type": "function", - }, - "tools": [ - { - "function": { - "description": "mock-description", - "name": "mock-tool-function", - "parameters": { - "properties": { - "key": { - "type": "string", - }, - }, - "type": "object", - }, - }, - "type": "function", - }, - ], - "top_logprobs": 5, - "top_p": 0.9, - "user": "mock-user", -} -`; - -exports[`OpenAiChatApi: prompt 1`] = ` +exports[`OpenAiChatApi: $prompt 1`] = ` { "messages": [ { @@ -105,7 +12,7 @@ exports[`OpenAiChatApi: prompt 1`] = ` } `; -exports[`OpenAiChatApi: prompt, examplePairs 1`] = ` +exports[`OpenAiChatApi: $prompt, examplePairs 1`] = ` { "messages": [ { @@ -133,7 +40,7 @@ exports[`OpenAiChatApi: prompt, examplePairs 1`] = ` } `; -exports[`OpenAiChatApi: prompt, examplePairs, messages 1`] = ` +exports[`OpenAiChatApi: $prompt, examplePairs, messages 1`] = ` { "messages": [ { @@ -157,7 +64,7 @@ exports[`OpenAiChatApi: prompt, examplePairs, messages 1`] = ` } `; -exports[`OpenAiChatApi: prompt, examplePairs, system 1`] = ` +exports[`OpenAiChatApi: $prompt, examplePairs, system 1`] = ` { "messages": [ { @@ -181,7 +88,7 @@ exports[`OpenAiChatApi: prompt, examplePairs, system 1`] = ` } `; -exports[`OpenAiChatApi: prompt, examplePairs, system, messages 1`] = ` +exports[`OpenAiChatApi: $prompt, examplePairs, system, messages 1`] = ` { "messages": [ { @@ -209,7 +116,7 @@ exports[`OpenAiChatApi: prompt, examplePairs, system, messages 1`] = ` } `; -exports[`OpenAiChatApi: prompt, functions 1`] = ` +exports[`OpenAiChatApi: $prompt, functions 1`] = ` { "functions": [ { @@ -235,7 +142,7 @@ exports[`OpenAiChatApi: prompt, functions 1`] = ` } `; -exports[`OpenAiChatApi: prompt, messages 1`] = ` +exports[`OpenAiChatApi: $prompt, messages 1`] = ` { "messages": [ { @@ -259,7 +166,7 @@ exports[`OpenAiChatApi: prompt, messages 1`] = ` } `; -exports[`OpenAiChatApi: prompt, messages with function_call 1`] = ` +exports[`OpenAiChatApi: $prompt, messages with function_call 1`] = ` { "messages": [ { @@ -279,7 +186,7 @@ exports[`OpenAiChatApi: prompt, messages with function_call 1`] = ` } `; -exports[`OpenAiChatApi: prompt, system 1`] = ` +exports[`OpenAiChatApi: $prompt, system 1`] = ` { "messages": [ { @@ -295,7 +202,7 @@ exports[`OpenAiChatApi: prompt, system 1`] = ` } `; -exports[`OpenAiChatApi: prompt, system, messages 1`] = ` +exports[`OpenAiChatApi: $prompt, system, messages 1`] = ` { "messages": [ { @@ -323,7 +230,7 @@ exports[`OpenAiChatApi: prompt, system, messages 1`] = ` } `; -exports[`OpenAiChatApi: prompt, tool_choice 1`] = ` +exports[`OpenAiChatApi: $prompt, tool_choice 1`] = ` { "messages": [ { @@ -341,7 +248,7 @@ exports[`OpenAiChatApi: prompt, tool_choice 1`] = ` } `; -exports[`OpenAiChatApi: prompt, tools 1`] = ` +exports[`OpenAiChatApi: $prompt, tools 1`] = ` { "messages": [ { @@ -369,3 +276,96 @@ exports[`OpenAiChatApi: prompt, tools 1`] = ` ], } `; + +exports[`OpenAiChatApi: all options 1`] = ` +{ + "frequency_penalty": 0.5, + "function_call": "auto", + "functions": [ + { + "description": "mock-description", + "name": "mock-function", + "parameters": { + "properties": { + "key": { + "type": "string", + }, + }, + "type": "object", + }, + }, + ], + "logit_bias": { + "50256": -100, + }, + "logprobs": true, + "max_tokens": 1000, + "messages": [ + { + "content": "mock-system-text", + "role": "system", + }, + { + "content": "mock-user-msg-1", + "role": "user", + }, + { + "content": "mock-assistant-msg-1", + "role": "assistant", + }, + { + "content": "mock-assistant-text", + "function_call": { + "arguments": "{ "key": "value" }", + "name": "mock-function", + }, + "role": "assistant", + }, + { + "content": "mock-prompt", + "role": "user", + }, + ], + "model": "mock-model-id", + "n": 3, + "presence_penalty": 0.3, + "response_format": { + "type": "json_object", + }, + "seed": 1234, + "stop": [ + "mock-stop", + ], + "stream": true, + "stream_options": { + "include_usage": true, + }, + "temperature": 0.7, + "tool_choice": { + "function": { + "name": "mock-tool-choice-function", + }, + "type": "function", + }, + "tools": [ + { + "function": { + "description": "mock-description", + "name": "mock-tool-function", + "parameters": { + "properties": { + "key": { + "type": "string", + }, + }, + "type": "object", + }, + }, + "type": "function", + }, + ], + "top_logprobs": 5, + "top_p": 0.9, + "user": "mock-user", +} +`; diff --git a/packages/core/src/apis/openai/openAiChatApi.spec.ts b/packages/core/src/apis/openai/openAiChatApi.spec.ts index 1964963..21eb406 100644 --- a/packages/core/src/apis/openai/openAiChatApi.spec.ts +++ b/packages/core/src/apis/openai/openAiChatApi.spec.ts @@ -17,17 +17,17 @@ describe("OpenAiChatApi:", () => { * FewShotRequestOptions (prompt, examplePairs, system): */ - test("prompt", () => { + test("$prompt", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs", () => { + test("$prompt, examplePairs", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, { user: "mock-user-msg-2", assistant: "mock-assistant-msg-2" }, @@ -37,18 +37,18 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system", () => { + test("$prompt, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", }); expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system", () => { + test("$prompt, examplePairs, system", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -62,9 +62,9 @@ describe("OpenAiChatApi:", () => { * "Native" few shot options (messages): */ - test("prompt, messages", () => { + test("$prompt, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", messages: [ { role: "assistant", @@ -84,9 +84,9 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, messages", () => { + test("$prompt, examplePairs, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -101,9 +101,9 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, system, messages", () => { + test("$prompt, system, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", system: "mock-system-text", messages: [ { @@ -124,9 +124,9 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, examplePairs, system, messages", () => { + test("$prompt, examplePairs, system, messages", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], @@ -146,9 +146,9 @@ describe("OpenAiChatApi:", () => { * Tool-related: */ - test("prompt, messages with function_call", () => { + test("$prompt, messages with function_call", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", messages: [ { role: "assistant", @@ -164,9 +164,9 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, tools", () => { + test("$prompt, tools", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tools: [ { type: "function", @@ -187,9 +187,9 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, tool_choice", () => { + test("$prompt, tool_choice", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", tool_choice: { type: "function", function: { @@ -201,9 +201,9 @@ describe("OpenAiChatApi:", () => { expect(rendered).toMatchSnapshot(); }); - test("prompt, functions", () => { + test("$prompt, functions", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", functions: [ { name: "mock-function", @@ -227,7 +227,7 @@ describe("OpenAiChatApi:", () => { test("all options", () => { const rendered = render({ - prompt: "mock-prompt", + $prompt: "mock-prompt", examplePairs: [ { user: "mock-user-msg-1", assistant: "mock-assistant-msg-1" }, ], diff --git a/packages/core/src/apis/openai/openAiChatApi.ts b/packages/core/src/apis/openai/openAiChatApi.ts index 15a1ffe..b10fd8a 100644 --- a/packages/core/src/apis/openai/openAiChatApi.ts +++ b/packages/core/src/apis/openai/openAiChatApi.ts @@ -94,7 +94,7 @@ export interface OpenAiChatOptions export const OpenAiChatTemplate = new FnTemplate( ({ modelId, - prompt, + $prompt, system, examplePairs, messages, @@ -142,7 +142,7 @@ export const OpenAiChatTemplate = new FnTemplate( : {}), })) : []), - { role: "user", content: prompt }, + { role: "user", content: $prompt }, ], }; diff --git a/packages/core/src/apis/shared/FewShotRequestOptions.ts b/packages/core/src/apis/shared/FewShotRequestOptions.ts index ca51cfe..15d8fc1 100644 --- a/packages/core/src/apis/shared/FewShotRequestOptions.ts +++ b/packages/core/src/apis/shared/FewShotRequestOptions.ts @@ -2,7 +2,7 @@ * @category Core Interfaces */ export interface FewShotRequestOptions { - prompt: string; + $prompt: string; system?: string; examplePairs?: { user: string; assistant: string }[]; } diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index da336ca..1f85049 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -79,7 +79,7 @@ export { createLmStudioModelProvider, createMistralModelProvider, createOpenAiChatModelProvider, - AwsBedrockAuthConfig as AwsAuthConfig, // TODO ??? + AwsBedrockAuthConfig, CohereAuthConfig, GroqAuthConfig, HuggingfaceAuthConfig, diff --git a/packages/core/src/providers/aws_bedrock/AwsBedrockModelProvider.spec.ts b/packages/core/src/providers/aws_bedrock/AwsBedrockModelProvider.spec.ts index 7f7323e..01ed01a 100644 --- a/packages/core/src/providers/aws_bedrock/AwsBedrockModelProvider.spec.ts +++ b/packages/core/src/providers/aws_bedrock/AwsBedrockModelProvider.spec.ts @@ -51,7 +51,7 @@ describe("AwsBedrockModelProvider", () => { // act const result = await provider.sendRequest({ modelId: "dummy-request-model-id", - prompt: "explain aws bedrock:", + $prompt: "explain aws bedrock:", }); // assert @@ -109,7 +109,7 @@ describe("AwsBedrockModelProvider", () => { // act const result = await provider.sendRequest({ modelId: "dummy-request-model-id", - prompt: "explain aws bedrock:", + $prompt: "explain aws bedrock:", }); // assert @@ -162,7 +162,7 @@ describe("AwsBedrockModelProvider", () => { await expect( provider.sendRequest({ modelId: "dummy-request-model-id", - prompt: "explain aws bedrock:", + $prompt: "explain aws bedrock:", }), ).rejects.toThrow("Signing failed"); }); diff --git a/packages/core/src/providers/aws_bedrock/createAwsBedrockModelProvider.ts b/packages/core/src/providers/aws_bedrock/createAwsBedrockModelProvider.ts index 6c7d834..11c8fbc 100644 --- a/packages/core/src/providers/aws_bedrock/createAwsBedrockModelProvider.ts +++ b/packages/core/src/providers/aws_bedrock/createAwsBedrockModelProvider.ts @@ -47,7 +47,7 @@ type AwsBedrockApi = * }); * * const response = await titanText.sendRequest({ - * prompt: "Brief history of NY Mets:" + * $prompt: "Brief history of NY Mets:" * // all other options for the specified `api` available here * }); * @@ -129,7 +129,7 @@ type AwsBedrockApi = * modelId: "ai21.j2-mid-v1", * }); * - * const params = { prompt: "Brief history of NY Mets:" }; + * const params = { $prompt: "Brief history of NY Mets:" }; * * const responses = await Promise.all([ * titanText.sendRequest(params), diff --git a/packages/core/src/providers/baseModelProvider.spec.ts b/packages/core/src/providers/baseModelProvider.spec.ts index 47be2ae..8a4d453 100644 --- a/packages/core/src/providers/baseModelProvider.spec.ts +++ b/packages/core/src/providers/baseModelProvider.spec.ts @@ -28,13 +28,13 @@ describe("BaseModelProvider", () => { }); // act - await provider.sendRequest({ prompt: "the ny mets are:" }); + await provider.sendRequest({ $prompt: "the ny mets are:" }); // assert expect(provider.dispatchRequest).toHaveBeenCalledWith( { modelId: "dummy-configured-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, undefined, ); @@ -52,14 +52,14 @@ describe("BaseModelProvider", () => { // act await provider.sendRequest({ modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }); // assert expect(provider.dispatchRequest).toHaveBeenCalledWith( { modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, undefined, ); @@ -78,7 +78,7 @@ describe("BaseModelProvider", () => { // act & assert await expect( - provider.sendRequest({ prompt: "the ny mets are:" }), + provider.sendRequest({ $prompt: "the ny mets are:" }), ).rejects.toThrow("Unexpected response from model provider"); }); @@ -99,7 +99,7 @@ describe("BaseModelProvider", () => { await provider.sendRequest( { modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, { metaProp: "dummy-meta", @@ -111,7 +111,7 @@ describe("BaseModelProvider", () => { expect(provider.history[0]).toEqual({ options: { modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, response: { data: "some valid response" }, meta: { @@ -122,7 +122,7 @@ describe("BaseModelProvider", () => { // act again await provider.sendRequest({ modelId: "dummy-request-model-id-2", - prompt: "the ny yankees are:", + $prompt: "the ny yankees are:", }); // assert again @@ -130,7 +130,7 @@ describe("BaseModelProvider", () => { expect(provider.history[0]).toEqual({ options: { modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, response: { data: "some valid response" }, meta: { @@ -140,7 +140,7 @@ describe("BaseModelProvider", () => { expect(provider.history[1]).toEqual({ options: { modelId: "dummy-request-model-id-2", - prompt: "the ny yankees are:", + $prompt: "the ny yankees are:", }, response: { data: "some valid response" }, meta: undefined, // no meta in second call @@ -161,7 +161,7 @@ describe("BaseModelProvider", () => { provider.sendRequest( { modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, { metaProp: "dummy-meta", @@ -173,7 +173,7 @@ describe("BaseModelProvider", () => { expect(provider.history[0]).toEqual({ options: { modelId: "dummy-request-model-id", - prompt: "the ny mets are:", + $prompt: "the ny mets are:", }, response: undefined, meta: { diff --git a/packages/core/src/providers/cohere/createCohereModelProvider.ts b/packages/core/src/providers/cohere/createCohereModelProvider.ts index 2e745a6..61d96c9 100644 --- a/packages/core/src/providers/cohere/createCohereModelProvider.ts +++ b/packages/core/src/providers/cohere/createCohereModelProvider.ts @@ -27,7 +27,7 @@ type CohereApi = CohereGenerateApi | CohereChatApi; * }); * * const response = await commandR.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * preamble: "Talk like Jafar from Aladdin", * // all other Cohere /generate options available here * }); @@ -82,7 +82,7 @@ type CohereApi = CohereGenerateApi | CohereChatApi; * }); * * const response = await commandR.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * preamble: "Talk like Jafar from Aladdin", * // all other Cohere /generate options available here * }); @@ -100,7 +100,7 @@ type CohereApi = CohereGenerateApi | CohereChatApi; * }); * * const response = await command.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * }); * ``` */ diff --git a/packages/core/src/providers/groq/createGroqModelProvider.ts b/packages/core/src/providers/groq/createGroqModelProvider.ts index 916d51b..b57865b 100644 --- a/packages/core/src/providers/groq/createGroqModelProvider.ts +++ b/packages/core/src/providers/groq/createGroqModelProvider.ts @@ -23,7 +23,7 @@ import type { GroqAuthConfig } from "./authConfig"; * }); * * const response = await llama3.sendRequest({ - * prompt: "Brief History of NY Mets:" + * $prompt: "Brief History of NY Mets:" * // all other OpenAI ChatCompletion options available here (Groq uses the OpenAI ChatCompletion API for all the models it hosts) * }); * @@ -68,7 +68,7 @@ import type { GroqAuthConfig } from "./authConfig"; * modelId: "llama3-70b-8192", * }); * - * const response = await llama3.sendRequest({ prompt: "Brief History of NY Mets:" }); + * const response = await llama3.sendRequest({ $prompt: "Brief History of NY Mets:" }); * * console.log(response.choices[0]?.message.content); * ``` diff --git a/packages/core/src/providers/http/httpModelProvider.spec.ts b/packages/core/src/providers/http/httpModelProvider.spec.ts index ad747a0..fddac92 100644 --- a/packages/core/src/providers/http/httpModelProvider.spec.ts +++ b/packages/core/src/providers/http/httpModelProvider.spec.ts @@ -48,7 +48,7 @@ describe("HttpModelProvider", () => { mockApi.requestTemplate.render.mockReturnValue(mockBody); // act - await provider.sendRequest({ prompt: "Hello, world!" }); + await provider.sendRequest({ $prompt: "Hello, world!" }); // assert expect(mockClient.fetch).toHaveBeenCalledWith(mockEndpoint, { @@ -75,7 +75,7 @@ describe("HttpModelProvider", () => { mockApi.requestTemplate.render.mockReturnValue(mockBody); // act - await provider.sendRequest({ prompt: "Hello, world!" }); + await provider.sendRequest({ $prompt: "Hello, world!" }); // assert expect(mockClient.fetch).toHaveBeenCalledWith(mockEndpoint, { @@ -106,7 +106,7 @@ describe("HttpModelProvider", () => { mockApi.requestTemplate.render.mockReturnValue(mockBody); // act - await provider.sendRequest({ prompt: "Hello, world!" }); + await provider.sendRequest({ $prompt: "Hello, world!" }); // assert expect(mockClient.fetch).toHaveBeenCalledWith(mockEndpoint, { @@ -135,7 +135,7 @@ describe("HttpModelProvider", () => { // act & assert await expect( - provider.sendRequest({ prompt: "Hello, world!" }), + provider.sendRequest({ $prompt: "Hello, world!" }), ).rejects.toThrow("Network error"); }); }); diff --git a/packages/core/src/providers/huggingface_inference/createHuggingfaceInferenceModelProvider.ts b/packages/core/src/providers/huggingface_inference/createHuggingfaceInferenceModelProvider.ts index 325c378..ae48d31 100644 --- a/packages/core/src/providers/huggingface_inference/createHuggingfaceInferenceModelProvider.ts +++ b/packages/core/src/providers/huggingface_inference/createHuggingfaceInferenceModelProvider.ts @@ -38,7 +38,7 @@ type HfApi = HfConversationalTaskApi | HfTextGenerationTaskApi; * }); * * const response = await gpt2.sendRequest({ - * prompt: "Hello," + * $prompt: "Hello," * // all other options for the specified `api` available here * }); * @@ -98,7 +98,7 @@ type HfApi = HfConversationalTaskApi | HfTextGenerationTaskApi; * }); * * const response = await gpt2.sendRequest({ - * prompt: "Hello," + * $prompt: "Hello," * // all other options for the specified `api` available here * }); * diff --git a/packages/core/src/providers/lm_studio/createLmStudioModelProvider.ts b/packages/core/src/providers/lm_studio/createLmStudioModelProvider.ts index 4b26727..1f4624a 100644 --- a/packages/core/src/providers/lm_studio/createLmStudioModelProvider.ts +++ b/packages/core/src/providers/lm_studio/createLmStudioModelProvider.ts @@ -16,7 +16,7 @@ import { HttpModelProvider, InferHttpClientOptions } from "../http"; * }); * * const response = await llama3.sendRequest({ - * prompt: "Brief History of NY Mets:" + * $prompt: "Brief History of NY Mets:" * // all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts) * }); * @@ -58,7 +58,7 @@ import { HttpModelProvider, InferHttpClientOptions } from "../http"; * }); * * const response = await llama3.sendRequest({ - * prompt: "Brief History of NY Mets:" + * $prompt: "Brief History of NY Mets:" * // all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts) * }); * diff --git a/packages/core/src/providers/mistral/createMistralModelProvider.ts b/packages/core/src/providers/mistral/createMistralModelProvider.ts index b95d127..257e7ab 100644 --- a/packages/core/src/providers/mistral/createMistralModelProvider.ts +++ b/packages/core/src/providers/mistral/createMistralModelProvider.ts @@ -20,7 +20,7 @@ import type { MistralAuthConfig } from "./authConfig"; * }); * * const response = await mistralLarge.sendRequest({ - * prompt: "Brief History of NY Mets:" + * $prompt: "Brief History of NY Mets:" * // all other Mistral ChatCompletion API options available here * }); * @@ -65,7 +65,7 @@ import type { MistralAuthConfig } from "./authConfig"; * }); * * const response = await mistralLarge.sendRequest({ - * prompt: "Brief History of NY Mets:" + * $prompt: "Brief History of NY Mets:" * // all other Mistral ChatCompletion API options available here * }); * diff --git a/packages/core/src/providers/openai/createOpenAiChatModelProvider.ts b/packages/core/src/providers/openai/createOpenAiChatModelProvider.ts index c908a43..bfdf8a2 100644 --- a/packages/core/src/providers/openai/createOpenAiChatModelProvider.ts +++ b/packages/core/src/providers/openai/createOpenAiChatModelProvider.ts @@ -23,7 +23,7 @@ import type { OpenAiAuthConfig } from "./authConfig"; * }); * * const response = await gpt.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * max_tokens: 100, * // all other OpenAI ChatCompletion options available here * }); @@ -69,7 +69,7 @@ import type { OpenAiAuthConfig } from "./authConfig"; * }); * * const response = await gpt.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * max_tokens: 100, * // all other OpenAI ChatCompletion options available here * }); diff --git a/packages/core/src/typeDefs.ts b/packages/core/src/typeDefs.ts index b03c971..6fcfcdc 100644 --- a/packages/core/src/typeDefs.ts +++ b/packages/core/src/typeDefs.ts @@ -15,7 +15,7 @@ export type ModelId = string; */ export interface ModelRequestOptions { modelId: ModelId; - prompt: string; + $prompt: string; } /** diff --git a/packages/gcloud-vertex-ai/src/createVertexAiModelProvider.ts b/packages/gcloud-vertex-ai/src/createVertexAiModelProvider.ts index a94e7d6..f9d1f82 100644 --- a/packages/gcloud-vertex-ai/src/createVertexAiModelProvider.ts +++ b/packages/gcloud-vertex-ai/src/createVertexAiModelProvider.ts @@ -24,7 +24,7 @@ export type InferHttpClientOptions = * }); * * const response = await gemini.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * // all other Gemini options available here * }); * @@ -77,7 +77,7 @@ export type InferHttpClientOptions = * }); * * const response = await gemini.sendRequest({ - * prompt: "Brief History of NY Mets:", + * $prompt: "Brief History of NY Mets:", * // all other Gemini options available here * }); * diff --git a/packages/generative-ts/README.md b/packages/generative-ts/README.md index 1648873..91c2ebf 100644 --- a/packages/generative-ts/README.md +++ b/packages/generative-ts/README.md @@ -45,7 +45,7 @@ const titanText = createAwsBedrockModelProvider({ }); const response = await titanText.sendRequest({ - prompt: "Brief history of NY Mets:" + $prompt: "Brief history of NY Mets:" // all other options for the specified `api` available here }); @@ -66,7 +66,7 @@ const commandR = createCohereModelProvider({ }); const response = await commandR.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", preamble: "Talk like Jafar from Aladdin", // all other Cohere /generate options available here }); @@ -88,7 +88,7 @@ const gemini = await createVertexAiModelProvider({ }); const response = await gemini.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", // all other Gemini options available here }); @@ -109,7 +109,7 @@ const llama3 = createGroqModelProvider({ }); const response = await llama3.sendRequest({ - prompt: "Brief History of NY Mets:" + $prompt: "Brief History of NY Mets:" // all other OpenAI ChatCompletion options available here (Groq uses the OpenAI ChatCompletion API for all the models it hosts) }); @@ -135,7 +135,7 @@ const gpt2 = createHuggingfaceInferenceModelProvider({ }); const response = await gpt2.sendRequest({ - prompt: "Hello," + $prompt: "Hello," // all other options for the specified `api` available here }); @@ -155,7 +155,7 @@ const llama3 = createLmStudioModelProvider({ }); const response = await llama3.sendRequest({ - prompt: "Brief History of NY Mets:" + $prompt: "Brief History of NY Mets:" // all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts) }); @@ -176,7 +176,7 @@ const mistralLarge = createMistralModelProvider({ }); const response = await mistralLarge.sendRequest({ - prompt: "Brief History of NY Mets:" + $prompt: "Brief History of NY Mets:" // all other Mistral ChatCompletion API options available here }); @@ -197,7 +197,7 @@ const gpt = createOpenAiChatModelProvider({ }); const response = await gpt.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", max_tokens: 100, // all other OpenAI ChatCompletion options available here }); diff --git a/tests/e2e/cjs-bundle-test.js b/tests/e2e/cjs-bundle-test.js index d2375b0..2719c16 100644 --- a/tests/e2e/cjs-bundle-test.js +++ b/tests/e2e/cjs-bundle-test.js @@ -10,7 +10,7 @@ async function send_message() { }); const response = await model.sendRequest({ - prompt: prompt, + $prompt: prompt, generation_config: { max_output_tokens: 50, temperature: 0 diff --git a/tests/e2e/core/cjs.test.js b/tests/e2e/core/cjs.test.js index c9dd4b9..306a6a4 100644 --- a/tests/e2e/core/cjs.test.js +++ b/tests/e2e/core/cjs.test.js @@ -11,7 +11,7 @@ describe('@generative-ts/core cjs bundle', () => { }); const response = await model.sendRequest({ - prompt: "Say hi:", + $prompt: "Say hi:", max_tokens: 50, temperature: 0 }); diff --git a/tests/e2e/core/esm.test.mjs b/tests/e2e/core/esm.test.mjs index d021665..d6d1a7f 100644 --- a/tests/e2e/core/esm.test.mjs +++ b/tests/e2e/core/esm.test.mjs @@ -11,7 +11,7 @@ describe('@generative-ts/core esm bundle', () => { }); const response = await model.sendRequest({ - prompt: "Say hi:", + $prompt: "Say hi:", max_tokens: 50, temperature: 0 }); diff --git a/tests/e2e/esm-bundle-test.js b/tests/e2e/esm-bundle-test.js index 7268e74..4f653ce 100644 --- a/tests/e2e/esm-bundle-test.js +++ b/tests/e2e/esm-bundle-test.js @@ -10,7 +10,7 @@ async function send_message() { }); const response = await model.sendRequest({ - prompt: prompt, + $prompt: prompt, generation_config: { max_output_tokens: 50, temperature: 0 diff --git a/tests/e2e/gcloud-vertex-ai/cjs.test.js b/tests/e2e/gcloud-vertex-ai/cjs.test.js index b934ae9..ad4dd59 100644 --- a/tests/e2e/gcloud-vertex-ai/cjs.test.js +++ b/tests/e2e/gcloud-vertex-ai/cjs.test.js @@ -15,7 +15,7 @@ describe('@generative-ts/gcloud-vertex-ai cjs bundle', () => { }); const response = await model.sendRequest({ - prompt: "Say hi:", + $prompt: "Say hi:", max_tokens: 50, temperature: 0 }); diff --git a/tests/e2e/gcloud-vertex-ai/esm.test.mjs b/tests/e2e/gcloud-vertex-ai/esm.test.mjs index 9dadc5c..9d84dcb 100644 --- a/tests/e2e/gcloud-vertex-ai/esm.test.mjs +++ b/tests/e2e/gcloud-vertex-ai/esm.test.mjs @@ -15,7 +15,7 @@ describe('@generative-ts/gcloud-vertex-ai esm bundle', () => { }); const response = await model.sendRequest({ - prompt: "Say hi:", + $prompt: "Say hi:", max_tokens: 50, temperature: 0 }); diff --git a/tests/e2e/generative-ts/cjs.test.js b/tests/e2e/generative-ts/cjs.test.js index e7dd9d2..7ad24c6 100644 --- a/tests/e2e/generative-ts/cjs.test.js +++ b/tests/e2e/generative-ts/cjs.test.js @@ -11,7 +11,7 @@ describe('generative-ts cjs bundle', () => { }); const response = await model.sendRequest({ - prompt: "Say hi:", + $prompt: "Say hi:", generation_config: { max_output_tokens: 50, temperature: 0 diff --git a/tests/e2e/generative-ts/esm.test.mjs b/tests/e2e/generative-ts/esm.test.mjs index 9f3b737..b3ba4f5 100644 --- a/tests/e2e/generative-ts/esm.test.mjs +++ b/tests/e2e/generative-ts/esm.test.mjs @@ -11,7 +11,7 @@ describe('generative-ts esm bundle', () => { }); const response = await model.sendRequest({ - prompt: "Say hi:", + $prompt: "Say hi:", generation_config: { max_output_tokens: 50, temperature: 0 diff --git a/tests/e2e/umd-bundle-test.html b/tests/e2e/umd-bundle-test.html index f33299c..c668eb8 100644 --- a/tests/e2e/umd-bundle-test.html +++ b/tests/e2e/umd-bundle-test.html @@ -27,7 +27,7 @@

UMD Bundle Test

}); const response = await model.sendRequest({ - prompt: prompt, + $prompt: prompt, generation_config: { max_output_tokens: 50, temperature: 0 diff --git a/tests/integration/bedrock.test.ts b/tests/integration/bedrock.test.ts index e431d60..3b47a37 100644 --- a/tests/integration/bedrock.test.ts +++ b/tests/integration/bedrock.test.ts @@ -17,7 +17,7 @@ test("Bedrock - AI21 J2 Complete", async () => { // act const response = await j2.sendRequest({ - prompt: "Brief history of NY Mets:", + $prompt: "Brief history of NY Mets:", numResults: 1, maxTokens: 50, minTokens: 0, @@ -40,7 +40,7 @@ test("Bedrock - Cohere Generate", async () => { // act const response = await cohereGenerate.sendRequest({ - prompt: "Brief history of NY Mets:", + $prompt: "Brief history of NY Mets:", max_tokens: 100, }); @@ -72,7 +72,7 @@ test("Bedrock - Llama2 Chat", async () => { '{ "answer": "The LA Dodgers won the World Series in 2020." }', }, ], - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", }; // act @@ -106,7 +106,7 @@ test("Bedrock - Llama3 Chat", async () => { '{ "answer": "The LA Dodgers won the World Series in 2020." }', }, ], - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", }; // act @@ -140,7 +140,7 @@ test("Bedrock - Mistral (Bedrock specific)", async () => { '{ "answer": "The LA Dodgers won the World Series in 2020." }', }, ], - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", }; // act @@ -159,7 +159,7 @@ test("Bedrock - Amazon TitanText", async () => { // act const response = await titanText.sendRequest({ - prompt: "Brief history of NY Mets:", + $prompt: "Brief history of NY Mets:", maxTokenCount: 100, }); diff --git a/tests/integration/cohere-tools.test.ts b/tests/integration/cohere-tools.test.ts index 1760319..d4fce11 100644 --- a/tests/integration/cohere-tools.test.ts +++ b/tests/integration/cohere-tools.test.ts @@ -9,7 +9,7 @@ test("Cohere - Chat (Tools)", async () => { // act // const response = await cohereChat.sendRequest({ - // prompt: "Will the NY Mets game be a rainout tonight", + // $prompt: "Will the NY Mets game be a rainout tonight", // preamble: "Answer like Jafar from Aladdin", // tools: [ // { @@ -47,7 +47,7 @@ test("Cohere - Chat (Tools)", async () => { */ const response = await cohereChat.sendRequest({ - prompt: "Will the NY Mets game be a rainout tonight", + $prompt: "Will the NY Mets game be a rainout tonight", preamble: "Answer like Jafar from Aladdin", system: "Use the correct JSON output format", examplePairs: [ diff --git a/tests/integration/cohere.test.ts b/tests/integration/cohere.test.ts index 2b9e2b6..95152e8 100644 --- a/tests/integration/cohere.test.ts +++ b/tests/integration/cohere.test.ts @@ -13,7 +13,7 @@ test("Cohere - Generate", async () => { // act const response = await cohereGen.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", return_likelihoods: "ALL", }); @@ -30,7 +30,7 @@ test("Cohere - Chat", async () => { // act const response = await cohereChat.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", preamble: "Talk like Jafar from Aladdin", }); @@ -46,7 +46,7 @@ test("Cohere - Chat (Default)", async () => { // act const response = await cohereChat.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", preamble: "Talk like Jafar from Aladdin", }); diff --git a/tests/integration/groq.test.ts b/tests/integration/groq.test.ts index ca3699c..a3194d4 100644 --- a/tests/integration/groq.test.ts +++ b/tests/integration/groq.test.ts @@ -9,7 +9,7 @@ test("Groq - OpenAI ChatCompletion", async () => { // act const response = await groq.sendRequest( { - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", max_tokens: 100, }, { diff --git a/tests/integration/hf.test.ts b/tests/integration/hf.test.ts index 2206f99..c39b751 100644 --- a/tests/integration/hf.test.ts +++ b/tests/integration/hf.test.ts @@ -13,7 +13,7 @@ test("Huggingface - Conversational Task", async () => { // act const response = await dialoGpt.sendRequest({ - prompt: "What is the capital of France? Please respond in JSON format.", + $prompt: "What is the capital of France? Please respond in JSON format.", past_user_inputs: [ "Whats the capital of Mexico? Please respond in JSON format.", ], @@ -35,7 +35,7 @@ test("Huggingface - TextGeneration Task", async () => { // act const response = await gpt2.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", }); // assert diff --git a/tests/integration/lmstudio.test.ts b/tests/integration/lmstudio.test.ts index 2251dd7..f8ea432 100644 --- a/tests/integration/lmstudio.test.ts +++ b/tests/integration/lmstudio.test.ts @@ -8,7 +8,7 @@ test("LmStudio - OpenAI ChatCompletion", async () => { // act const response = await lmStudio.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", max_tokens: 100, }); diff --git a/tests/integration/mistral.test.ts b/tests/integration/mistral.test.ts index 3a0ac24..8568988 100644 --- a/tests/integration/mistral.test.ts +++ b/tests/integration/mistral.test.ts @@ -8,7 +8,7 @@ test("Mistral - Mistral AI ChatCompletion", async () => { // act const response = await mistralLarge.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", max_tokens: 100, }); diff --git a/tests/integration/openai.test.ts b/tests/integration/openai.test.ts index 5368a4e..a7d425c 100644 --- a/tests/integration/openai.test.ts +++ b/tests/integration/openai.test.ts @@ -8,7 +8,7 @@ test("OpenAI - OpenAI ChatCompletion", async () => { // act const response = await gpt4.sendRequest({ - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", max_tokens: 100, }); diff --git a/tests/integration/vertexai-tools.test.ts b/tests/integration/vertexai-tools.test.ts index 09cce92..0af51bd 100644 --- a/tests/integration/vertexai-tools.test.ts +++ b/tests/integration/vertexai-tools.test.ts @@ -13,7 +13,7 @@ xtest("VertexAI - Google Gemini (Tools)", async () => { // act const response = await model.sendRequest({ system: "Use tools to help answer questions.", - prompt: "What is the weather in Boston and New York City?", + $prompt: "What is the weather in Boston and New York City?", tools: [ { function_declarations: [ @@ -55,7 +55,7 @@ xtest("VertexAI - Google Gemini (Tools with Responses)", async () => { // act const response = await model.sendRequest({ system: "Use tools to help answer questions.", - prompt: "", + $prompt: "", examplePairs: [ { user: "When did the New York Mets win the World Series?", @@ -205,9 +205,14 @@ test("VertexAI - Google Gemini ($tools)", async () => { ]; // act + const system = + "Use tools to help answer questions. Keep in mind that you can make multiple tool calls."; + + const $prompt = "What is the weather in Boston and New York City?"; + const response = await model.sendRequest({ - system: "Use tools to help answer questions.", - prompt: "What is the weather in Boston and New York City?", + system, + $prompt, $tools: tools.map(({ descriptor }) => descriptor), }); @@ -224,17 +229,16 @@ test("VertexAI - Google Gemini ($tools)", async () => { console.log("No unresolved!"); } + // TODO const contents = getConversationHistory(modelProvider); ??? const last = response.data.candidates[0]?.content; if (!last) { throw new Error("No content found in response!?"); } - const response2 = await model.sendRequest({ - system: - "Use tools to help answer questions. Keep in mind that you can make multiple tool calls.", - prompt: "What is the weather in Boston and New York City?", + system, + $prompt, contents: [ { // ...last, diff --git a/tests/integration/vertexai.test.ts b/tests/integration/vertexai.test.ts index 971e522..98fdf61 100644 --- a/tests/integration/vertexai.test.ts +++ b/tests/integration/vertexai.test.ts @@ -9,7 +9,7 @@ test("VertexAI - Google Gemini", async () => { // act const response = await model.sendRequest({ system: "Talk like Jafar from Aladdin", - prompt: "Brief History of NY Mets:", + $prompt: "Brief History of NY Mets:", examplePairs: [ { user: "When did the New York Mets win the World Series?",