forked from pedrojlazevedo/twitch-chatgpt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.js
133 lines (106 loc) · 4.19 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
const express = require('express')
const request = require('request')
const app = express()
const fs = require('fs');
const { promisify } = require('util')
const readFile = promisify(fs.readFile)
const GPT_MODE = process.env.GPT_MODE
let file_context = "You are a helpful Twitch Chatbot."
const messages = [
{role: "system", content: "You are a helpful Twitch Chatbot."}
];
console.log("GPT_MODE is " + GPT_MODE)
console.log("History length is " + process.env.HISTORY_LENGTH)
console.log("OpenAI API Key:" + process.env.OPENAI_API_KEY)
app.use(express.json({extended: true, limit: '1mb'}))
app.all('/', (req, res) => {
console.log("Just got a request!")
res.send('Yo!')
})
if (process.env.GPT_MODE === "CHAT"){
fs.readFile("./file_context.txt", 'utf8', function(err, data) {
if (err) throw err;
console.log("Reading context file and adding it as system level message for the agent.")
messages[0].content = data;
});
} else {
fs.readFile("./file_context.txt", 'utf8', function(err, data) {
if (err) throw err;
console.log("Reading context file and adding it in front of user prompts:")
file_context = data;
console.log(file_context);
});
}
app.get('/gpt/*', async (req, res) => {
//The agent should recieve Username:Message in the text to identify conversations with different users in his history.
const text = req.params[0];
const { Configuration, OpenAIApi } = require("openai");
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
if (GPT_MODE === "CHAT"){
//CHAT MODE EXECUTION
//Add user message to messages
messages.push({role: "user", content: text})
//Check if message history is exceeded
console.log("Conversations in History: " + ((messages.length / 2) -1) + "/" + process.env.HISTORY_LENGTH)
if(messages.length > ((process.env.HISTORY_LENGTH * 2) + 1)) {
console.log('Message amount in history exceeded. Removing oldest user and agent messages.')
messages.splice(1,2)
}
console.log("Messages: ")
console.dir(messages)
console.log("User Input: " + text)
const response = await openai.createChatCompletion({
model: "gpt-3.5-turbo",
messages: messages,
temperature: 0.5,
max_tokens: 128,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
});
if (response.data.choices) {
let agent_response = response.data.choices[0].message.content
console.log ("Agent answer: " + agent_response)
messages.push({role: "assistant", content: agent_response})
//Check for Twitch max. chat message length limit and slice if needed
if(agent_response.length > 399){
console.log("Agent answer exceeds twitch chat limit. Slicing to first 399 characters.")
agent_response = agent_response.substring(0, 399)
console.log ("Sliced agent answer: " + agent_response)
}
res.send(agent_response)
} else {
res.send("Something went wrong. Try again later!")
}
} else {
//PROMPT MODE EXECUTION
const prompt = file_context + "\n\nQ:" + text + "\nA:";
console.log("User Input: " + text)
const response = await openai.createCompletion({
model: "text-davinci-003",
prompt: prompt,
temperature: 0.5,
max_tokens: 128,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
});
if (response.data.choices) {
let agent_response = response.data.choices[0].text
console.log ("Agent answer: " + agent_response)
//Check for Twitch max. chat message length limit and slice if needed
if(agent_response.length > 399){
console.log("Agent answer exceeds twitch chat limit. Slicing to first 399 characters.")
agent_response = agent_response.substring(0, 399)
console.log ("Sliced Agent answer: " + agent_response)
}
res.send(agent_response)
} else {
res.send("Something went wrong. Try again later!")
}
}
})
app.listen(process.env.PORT || 3000)