Skip to content

Commit

Permalink
rename prompt -> > %{�[32m%}%(!.#.$)%{�[00m%}
Browse files Browse the repository at this point in the history
  • Loading branch information
jnaglick committed Jun 21, 2024
1 parent d746bf7 commit 7e598f3
Show file tree
Hide file tree
Showing 74 changed files with 833 additions and 1,904 deletions.
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ const titanText = createAwsBedrockModelProvider({
});

const response = await titanText.sendRequest({
prompt: "Brief history of NY Mets:"
$prompt:"Brief history of NY Mets:"
// all other options for the specified `api` available here
});

Expand All @@ -66,7 +66,7 @@ const commandR = createCohereModelProvider({
});

const response = await commandR.sendRequest({
prompt: "Brief History of NY Mets:",
$prompt:"Brief History of NY Mets:",
preamble: "Talk like Jafar from Aladdin",
// all other Cohere /generate options available here
});
Expand All @@ -88,7 +88,7 @@ const gemini = await createVertexAiModelProvider({
});

const response = await gemini.sendRequest({
prompt: "Brief History of NY Mets:",
$prompt:"Brief History of NY Mets:",
// all other Gemini options available here
});

Expand All @@ -109,7 +109,7 @@ const llama3 = createGroqModelProvider({
});

const response = await llama3.sendRequest({
prompt: "Brief History of NY Mets:"
$prompt:"Brief History of NY Mets:"
// all other OpenAI ChatCompletion options available here (Groq uses the OpenAI ChatCompletion API for all the models it hosts)
});

Expand All @@ -135,7 +135,7 @@ const gpt2 = createHuggingfaceInferenceModelProvider({
});

const response = await gpt2.sendRequest({
prompt: "Hello,"
$prompt:"Hello,"
// all other options for the specified `api` available here
});

Expand All @@ -155,7 +155,7 @@ const llama3 = createLmStudioModelProvider({
});

const response = await llama3.sendRequest({
prompt: "Brief History of NY Mets:"
$prompt:"Brief History of NY Mets:"
// all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts)
});

Expand All @@ -176,7 +176,7 @@ const mistralLarge = createMistralModelProvider({
});

const response = await mistralLarge.sendRequest({
prompt: "Brief History of NY Mets:"
$prompt:"Brief History of NY Mets:"
// all other Mistral ChatCompletion API options available here
});

Expand All @@ -197,7 +197,7 @@ const gpt = createOpenAiChatModelProvider({
});

const response = await gpt.sendRequest({
prompt: "Brief History of NY Mets:",
$prompt:"Brief History of NY Mets:",
max_tokens: 100,
// all other OpenAI ChatCompletion options available here
});
Expand Down
2 changes: 1 addition & 1 deletion examples/few-shot-chat-models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ const payload = {
assistant: '{ "answer": "The LA Dodgers won the World Series in 2020." }',
},
],
prompt: "Brief History of NY Mets:",
$prompt: "Brief History of NY Mets:",
};

async function main() {
Expand Down
24 changes: 12 additions & 12 deletions examples/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import {
} from "@packages/core";

async function main() {
const prompt = "Brief History of NY Mets:";
const $prompt = "Brief History of NY Mets:";

const gptProvider = createOpenAiChatModelProvider({
modelId: "gpt-4-turbo",
Expand Down Expand Up @@ -76,39 +76,39 @@ async function main() {
provider: gptProvider,
params: {
system: "talk like jafar from aladdin",
prompt,
$prompt,
max_tokens: 50,
temperature: 1.0,
},
},
{
name: "Titan(AWS)",
provider: titanTextProvider,
params: { prompt, maxTokenCount: 50, temperature: 1.0 },
params: { $prompt, maxTokenCount: 50, temperature: 1.0 },
},
{
name: "Cohere-Command(AWS)",
provider: cohereCommandProvider,
params: { prompt, max_tokens: 50, temperature: 1.0 },
params: { $prompt, max_tokens: 50, temperature: 1.0 },
},
{
name: "DialoGPT(HF)",
provider: hfConvoProvider,
params: {
prompt,
$prompt,
parameters: { max_new_tokens: 50, temperature: 1.0 },
},
},
{
name: "GPT2(HF)",
provider: hfTextgenProvider,
params: { prompt, parameters: { max_new_tokens: 50, temperature: 1.0 } },
params: { $prompt, parameters: { max_new_tokens: 50, temperature: 1.0 } },
},
{
name: "LLama3(LM Studio)",
provider: lmStudioProvider,
params: {
prompt,
$prompt,
system: "talk like iago from aladdin",
temperature: 1.0,
max_tokens: 50,
Expand All @@ -118,34 +118,34 @@ async function main() {
name: "Llama3(AWS)",
provider: llama3aws,
params: {
prompt,
$prompt,
system: "talk like jafar from aladdin",
temperature: 1.0,
},
},
{
name: "Jurassic2(AWS)",
provider: jurassic,
params: { prompt, maxTokens: 50, temperature: 1.0 },
params: { $prompt, maxTokens: 50, temperature: 1.0 },
},
{
name: "Mistral(AWS)",
provider: mistral,
params: { prompt, temperature: 1.0 },
params: { $prompt, temperature: 1.0 },
},
{
name: "Lama3-70b(Groq)",
provider: groqProvider,
params: {
prompt,
$prompt,
system: "talk like jafar from aladdin",
temperature: 1.0,
},
},
{
name: "Cohere-Command(Cohere-API)",
provider: cohereProvider,
params: { prompt },
params: { $prompt },
},
];

Expand Down
12 changes: 6 additions & 6 deletions packages/core/src/apis/ai21/__snapshots__/jurassic2.spec.ts.snap
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP

exports[`Ai21Jurassic2Api: $prompt 1`] = `
{
"prompt": "mock-prompt",
}
`;

exports[`Ai21Jurassic2Api: all options 1`] = `
{
"countPenalty": {
Expand Down Expand Up @@ -39,9 +45,3 @@ exports[`Ai21Jurassic2Api: all options 1`] = `
"topP": 0.9,
}
`;

exports[`Ai21Jurassic2Api: prompt 1`] = `
{
"prompt": "mock-prompt",
}
`;
6 changes: 3 additions & 3 deletions packages/core/src/apis/ai21/jurassic2.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ describe("Ai21Jurassic2Api:", () => {
* FewShotRequestOptions (prompt):
*/

test("prompt", () => {
test("$prompt", () => {
const rendered = render({
prompt: "mock-prompt",
$prompt: "mock-prompt",
});

expect(rendered).toMatchSnapshot();
Expand All @@ -31,7 +31,7 @@ describe("Ai21Jurassic2Api:", () => {

test("all options", () => {
const rendered = render({
prompt: "mock-prompt",
$prompt: "mock-prompt",
numResults: 3,
maxTokens: 200,
minTokens: 100,
Expand Down
4 changes: 2 additions & 2 deletions packages/core/src/apis/ai21/jurassic2.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ export interface Ai21Jurassic2Options extends ModelRequestOptions {
*/
export const Ai21Jurassic2Template = new FnTemplate(
({
prompt,
$prompt,
numResults,
maxTokens,
minTokens,
Expand All @@ -54,7 +54,7 @@ export const Ai21Jurassic2Template = new FnTemplate(
}: Ai21Jurassic2Options) => {
return JSON.stringify(
{
prompt,
prompt: $prompt,
numResults,
maxTokens,
minTokens,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP

exports[`AmazonTitanTextApi: $prompt 1`] = `
{
"inputText": "mock-prompt",
}
`;

exports[`AmazonTitanTextApi: all options 1`] = `
{
"inputText": "mock-prompt",
Expand All @@ -14,9 +20,3 @@ exports[`AmazonTitanTextApi: all options 1`] = `
},
}
`;

exports[`AmazonTitanTextApi: prompt 1`] = `
{
"inputText": "mock-prompt",
}
`;
6 changes: 3 additions & 3 deletions packages/core/src/apis/amazon/titanText.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ describe("AmazonTitanTextApi:", () => {
/**
* FewShotRequestOptions (prompt):
*/
test("prompt", () => {
test("$prompt", () => {
const rendered = render({
prompt: "mock-prompt",
$prompt: "mock-prompt",
});

expect(rendered).toMatchSnapshot();
Expand All @@ -29,7 +29,7 @@ describe("AmazonTitanTextApi:", () => {
*/
test("all options", () => {
const rendered = render({
prompt: "mock-prompt",
$prompt: "mock-prompt",
temperature: 0.7,
topP: 0.9,
maxTokenCount: 100,
Expand Down
4 changes: 2 additions & 2 deletions packages/core/src/apis/amazon/titanText.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@ export interface AmazonTitanTextOptions extends ModelRequestOptions {
*/
export const AmazonTitanTextTemplate = new FnTemplate(
({
prompt,
$prompt,
temperature,
topP,
maxTokenCount,
stopSequences,
}: AmazonTitanTextOptions) => {
const rewritten = {
inputText: prompt,
inputText: $prompt,
};

const textGenerationConfig = {
Expand Down
Loading

0 comments on commit 7e598f3

Please sign in to comment.