generated from langchain-ai/langchain-nextjs-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
route.ts
73 lines (61 loc) · 2.29 KB
/
route.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import { NextRequest, NextResponse } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { HttpResponseOutputParser } from "langchain/output_parsers";
export const runtime = "edge";
const formatMessage = (message: VercelChatMessage) => {
return `${message.role}: ${message.content}`;
};
const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect.
Current conversation:
{chat_history}
User: {input}
AI:`;
/**
* This handler initializes and calls a simple chain with a prompt,
* chat model, and output parser. See the docs for more information:
*
* https://js.langchain.com/docs/guides/expression_language/cookbook#prompttemplate--llm--outputparser
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const messages = body.messages ?? [];
const formattedPreviousMessages = messages.slice(0, -1).map(formatMessage);
const currentMessageContent = messages[messages.length - 1].content;
const prompt = PromptTemplate.fromTemplate(TEMPLATE);
/**
* You can also try e.g.:
*
* import { ChatAnthropic } from "@langchain/anthropic";
* const model = new ChatAnthropic({});
*
* See a full list of supported models at:
* https://js.langchain.com/docs/modules/model_io/models/
*/
const model = new ChatOpenAI({
temperature: 0.8,
model: "gpt-3.5-turbo-0125",
});
/**
* Chat models stream message chunks rather than bytes, so this
* output parser handles serialization and byte-encoding.
*/
const outputParser = new HttpResponseOutputParser();
/**
* Can also initialize as:
*
* import { RunnableSequence } from "@langchain/core/runnables";
* const chain = RunnableSequence.from([prompt, model, outputParser]);
*/
const chain = prompt.pipe(model).pipe(outputParser);
const stream = await chain.stream({
chat_history: formattedPreviousMessages.join("\n"),
input: currentMessageContent,
});
return new StreamingTextResponse(stream);
} catch (e: any) {
return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
}
}