From a89d85edf6c02e4f3e8500c5245a5b3e33027a63 Mon Sep 17 00:00:00 2001 From: rpggio Date: Thu, 8 Feb 2024 17:30:14 -0800 Subject: [PATCH] GPT model list --- manifest.json | 2 +- package.json | 2 +- src/noteGenerator.ts | 2 +- src/openai/chatGPT.ts | 58 +++++++++++++++++++++--------- src/settings/ChatStreamSettings.ts | 2 +- versions.json | 3 +- 6 files changed, 47 insertions(+), 22 deletions(-) diff --git a/manifest.json b/manifest.json index ee34a7c..aaae370 100644 --- a/manifest.json +++ b/manifest.json @@ -1,7 +1,7 @@ { "id": "chat-stream", "name": "Chat Stream", - "version": "1.4.4", + "version": "1.4.5", "minAppVersion": "1.1.10", "description": "Create branching GPT chats using canvas notes.", "author": "Ryan P Smith", diff --git a/package.json b/package.json index 88ca326..235ed2b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "obsidian-chat-stream", - "version": "1.4.", + "version": "1.4.5", "description": "Create branching GPT chats using Obsidian canvas", "main": "src/main.js", "scripts": { diff --git a/src/noteGenerator.ts b/src/noteGenerator.ts index 9e4fede..d9a0784 100644 --- a/src/noteGenerator.ts +++ b/src/noteGenerator.ts @@ -276,7 +276,7 @@ export function noteGenerator( } function getTokenLimit(settings: ChatStreamSettings) { - const model = chatModelByName(settings.apiModel) || CHAT_MODELS.GPT35 + const model = chatModelByName(settings.apiModel) || CHAT_MODELS.GPT_35_TURBO_0125 return settings.maxInputTokens ? Math.min(settings.maxInputTokens, model.tokenLimit) : model.tokenLimit diff --git a/src/openai/chatGPT.ts b/src/openai/chatGPT.ts index c00bc0c..cac88dd 100644 --- a/src/openai/chatGPT.ts +++ b/src/openai/chatGPT.ts @@ -4,29 +4,53 @@ import { openai } from './chatGPT-types' export const OPENAI_COMPLETIONS_URL = `https://api.openai.com/v1/chat/completions` export const CHAT_MODELS = { - GPT35: { + GPT_35_TURBO: { name: 'gpt-3.5-turbo', tokenLimit: 4096 }, - GPT35_16K: { + GPT_35_TURBO_0125: { + name: 'gpt-3.5-turbo-0125', + tokenLimit: 16385 + }, + GPT_35_16K: { name: 'gpt-3.5-turbo-16k', - tokenLimit: 16384 + tokenLimit: 16385 + }, + GPT_35_TURBO_1106: { + name: 'gpt-3.5-turbo-1106', + tokenLimit: 16385 }, - GPT4: { + GPT_4: { name: 'gpt-4', - tokenLimit: 8000 + tokenLimit: 8192 + }, + GPT_4_TURBO_PREVIEW: { + name: 'gpt-4-turbo-preview', + tokenLimit: 128000 + }, + GPT_4_0125_PREVIEW: { + name: 'gpt-4-0125-preview', + tokenLimit: 128000 }, GPT_4_1106_PREVIEW: { name: 'gpt-4-1106-preview', tokenLimit: 128000 }, - GPT4_32K: { + GPT_4_0613: { + name: 'gpt-4-0613', + tokenLimit: 8192 + }, + GPT_4_32K: { name: 'gpt-4-32k', tokenLimit: 32768 + }, + GPT_4_32K_0613: { + name: 'gpt-4-32k-0613', + tokenLimit: 32768 } -} +} as const -export type ChatGPTModel = typeof CHAT_MODELS.GPT35 | typeof CHAT_MODELS.GPT4 +export type ChatGPTModel = keyof typeof CHAT_MODELS export type ChatGPTModelType = keyof typeof CHAT_MODELS @@ -35,15 +59,15 @@ export function chatModelByName(name: string) { } export const defaultChatGPTSettings: Partial = - { - model: CHAT_MODELS.GPT35.name, - max_tokens: 500, - temperature: 0, - top_p: 1.0, - presence_penalty: 0, - frequency_penalty: 0, - stop: [] - } +{ + model: CHAT_MODELS.GPT_35_TURBO.name, + max_tokens: 500, + temperature: 0, + top_p: 1.0, + presence_penalty: 0, + frequency_penalty: 0, + stop: [] +} export async function getChatGPTCompletion( apiKey: string, diff --git a/src/settings/ChatStreamSettings.ts b/src/settings/ChatStreamSettings.ts index 1ffac5f..74a851b 100644 --- a/src/settings/ChatStreamSettings.ts +++ b/src/settings/ChatStreamSettings.ts @@ -58,7 +58,7 @@ Use step-by-step reasoning. Be brief. export const DEFAULT_SETTINGS: ChatStreamSettings = { apiKey: '', apiUrl: OPENAI_COMPLETIONS_URL, - apiModel: CHAT_MODELS.GPT35.name, + apiModel: CHAT_MODELS.GPT_35_TURBO.name, temperature: 1, systemPrompt: DEFAULT_SYSTEM_PROMPT, debug: false, diff --git a/versions.json b/versions.json index fd6da51..050d2fb 100644 --- a/versions.json +++ b/versions.json @@ -8,5 +8,6 @@ "1.3.2": "1.1.10", "1.3.3": "1.1.10", "1.4.3": "1.1.10", - "1.4.4": "1.1.10" + "1.4.4": "1.1.10", + "1.4.5": "1.1.10" }