diff --git a/app/vscode/asset/package.json b/app/vscode/asset/package.json index c36b654..5768bbb 100644 --- a/app/vscode/asset/package.json +++ b/app/vscode/asset/package.json @@ -221,19 +221,23 @@ }, "rubberduck.model": { "type": "string", - "default": "gpt-3.5-turbo-16k", + "default": "gpt-3.5-turbo-1106", "enum": [ "gpt-3.5-turbo", "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-1106", "gpt-4", "gpt-4-32k", + "gpt-4-1106-preview", "llama.cpp" ], "enumDescriptions": [ "OpenAI GPT-3.5-turbo: 4k context window. Faster, less expensive model. Less accurate.", "OpenAI GPT-3.5-turbo: 16k context window. Faster, less expensive model. Less accurate.", + "OpenAI GPT-3.5-turbo: 16k context window. Latest 3.5 model. Faster, less expensive. Less accurate.", "OpenAI GPT-4: 8k context window. Expensive, slow model. More accurate.", - "OpenAI GPT-4: 32k context window.Expensive, slow model. More accurate. Requires beta access.", + "OpenAI GPT-4: 32k context window. Expensive, slow model. More accurate.", + "OpenAI GPT-4: 128k context window. Latest model. Expensive (but cheaper than 32k), slow model. More accurate.", "(Experimental) Llama.cpp: Calls Llama.cpp running locally on http://127.0.0.1:8080. Use for local models with Llama 2 prompt format." ], "markdownDescription": "Select the OpenAI model that you want to use.", diff --git a/lib/extension/src/ai/AIClient.ts b/lib/extension/src/ai/AIClient.ts index e506b81..a47a1a0 100644 --- a/lib/extension/src/ai/AIClient.ts +++ b/lib/extension/src/ai/AIClient.ts @@ -29,8 +29,10 @@ function getModel() { .enum([ "gpt-4", "gpt-4-32k", + "gpt-4-1106-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-1106", "llama.cpp", ]) .parse(vscode.workspace.getConfiguration("rubberduck").get("model"));