Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: 修改ai发散承担的文案和temperature对应范围 #268

Merged
merged 2 commits into from
Nov 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion code/config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ BOT_NAME: chatGpt
# openAI key 支持负载均衡 可以填写多个key 用逗号分隔
OPENAI_KEY: sk-xxx,sk-xxx,sk-xxx
# openAI model 指定模型,默认为 gpt-3.5-turbo
# 可选参数有:"gpt-4-0314", "gpt-4", "gpt-3.5-turbo-0301","gpt-3.5-turbo-16k", "gpt-3.5-turbo",如果使用gpt-4,请确认自己是否有接口调用白名单
# 可选参数有:"gpt-4-1106-preview", "gpt-4-32K","gpt-4","gpt-3.5-turbo-16k", "gpt-3.5-turbo","gpt-3.5-turbo-16k","gpt-3.5-turbo-1106" 等
# 如果使用gpt-4,请确认自己是否有接口调用白名单
OPENAI_MODEL: gpt-3.5-turbo
# openAI 最大token数 默认为2000
OPENAI_MAX_TOKENS: 2000
Expand Down
2 changes: 1 addition & 1 deletion code/handlers/card_ai_mode_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func CommonProcessAIMode(msg CardMsg, cardAction *larkcard.CardAction,
cache services.SessionServiceCacheInterface) (interface{},
error, bool) {
option := cardAction.Action.Option
replyMsg(context.Background(), "已选择AI模式:"+option,
replyMsg(context.Background(), "已选择发散模式:"+option,
&msg.MsgId)
cache.SetAIMode(msg.SessionId, openai.AIModeMap[option])
return nil, nil, true
Expand Down
4 changes: 2 additions & 2 deletions code/handlers/event_common_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,12 +155,12 @@ func (*RoleListAction) Execute(a *ActionInfo) bool {
return true
}

type AIModeAction struct { /*AI模式*/
type AIModeAction struct { /*发散模式*/
}

func (*AIModeAction) Execute(a *ActionInfo) bool {
if _, foundMode := utils.EitherCutPrefix(a.info.qParsed,
"/ai_mode", "AI模式"); foundMode {
"/ai_mode", "发散模式"); foundMode {
SendAIModeListsCard(*a.ctx, a.info.sessionId, a.info.msgId, openai.AIModeStrs)
return false
}
Expand Down
6 changes: 4 additions & 2 deletions code/handlers/event_msg_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@ func (*MessageAction) Execute(a *ActionInfo) bool {
Role: "user", Content: a.info.qParsed,
})

//fmt.Println("msg", msg)
//logger.Debug("msg", msg)
// get ai mode as temperature
aiMode := a.handler.sessionCache.GetAIMode(*a.info.sessionId)
fmt.Println("msg: ", msg)
fmt.Println("aiMode: ", aiMode)
completions, err := a.handler.gpt.Completions(msg, aiMode)
if err != nil {
replyMsg(*a.ctx, fmt.Sprintf(
Expand Down Expand Up @@ -132,6 +132,8 @@ func (m *StreamMessageAction) Execute(a *ActionInfo) bool {

//log.Printf("UserId: %s , Request: %s", a.info.userId, msg)
aiMode := a.handler.sessionCache.GetAIMode(*a.info.sessionId)
//fmt.Println("msg: ", msg)
//fmt.Println("aiMode: ", aiMode)
if err := a.handler.gpt.StreamChat(*a.ctx, msg, aiMode,
chatResponseStream); err != nil {
err := updateFinalCard(*a.ctx, "聊天失败", cardId, ifNewTopic)
Expand Down
6 changes: 3 additions & 3 deletions code/handlers/msg.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ var (
PicVarMoreKind = CardKind("pic_var_more") // 变量图片
RoleTagsChooseKind = CardKind("role_tags_choose") // 内置角色所属标签选择
RoleChooseKind = CardKind("role_choose") // 内置角色选择
AIModeChooseKind = CardKind("ai_mode_choose") // AI模式选择
AIModeChooseKind = CardKind("ai_mode_choose") // 发散模式选择
)

var (
Expand Down Expand Up @@ -712,7 +712,7 @@ func sendHelpCard(ctx context.Context,
"sessionId": *sessionId,
}, larkcard.MessageCardButtonTypeDanger)),
withSplitLine(),
withMainMd("🤖 **AI模式选择** \n"+" 文本回复 *AI模式* 或 */ai_mode*"),
withMainMd("🤖 **发散模式选择** \n"+" 文本回复 *发散模式* 或 */ai_mode*"),
withSplitLine(),
withMainMd("🛖 **内置角色列表** \n"+" 文本回复 *角色列表* 或 */roles*"),
withSplitLine(),
Expand Down Expand Up @@ -810,7 +810,7 @@ func SendRoleListCard(ctx context.Context,
func SendAIModeListsCard(ctx context.Context,
sessionId *string, msgId *string, aiModeStrs []string) {
newCard, _ := newSendCard(
withHeader("🤖 AI模式选择", larkcard.TemplateIndigo),
withHeader("🤖 发散模式选择", larkcard.TemplateIndigo),
withAIModeBtn(sessionId, aiModeStrs),
withNote("提醒:选择内置模式,让AI更好的理解您的需求。"))
replyCard(ctx, msgId, newCard)
Expand Down
22 changes: 11 additions & 11 deletions code/services/openai/gpt3.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,23 @@ type AIMode float64

const (
Fresh AIMode = 0.1
Warmth AIMode = 0.4
Balance AIMode = 0.7
Creativity AIMode = 1.0
Warmth AIMode = 0.7
Balance AIMode = 1.2
Creativity AIMode = 1.7
)

var AIModeMap = map[string]AIMode{
"清新": Fresh,
"温暖": Warmth,
"平衡": Balance,
"创意": Creativity,
"严谨": Fresh,
"简洁": Warmth,
"标准": Balance,
"发散": Creativity,
}

var AIModeStrs = []string{
"清新",
"温暖",
"平衡",
"创意",
"严谨",
"简洁",
"标准",
"发散",
}

type Messages struct {
Expand Down
Loading