From 8743be679bee9eda1b1b8cac7fb07eb73264a125 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Mon, 13 Nov 2023 15:17:22 -0800 Subject: [PATCH] assume default model where appropriate (#366) * assume default model where appropriate * merge with master and fix other model refs --- server/utils/AiProviders/anthropic/index.js | 2 +- server/utils/AiProviders/lmStudio/index.js | 4 ++-- server/utils/AiProviders/openAi/index.js | 16 +++++++--------- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js index 703c0859a..709333231 100644 --- a/server/utils/AiProviders/anthropic/index.js +++ b/server/utils/AiProviders/anthropic/index.js @@ -12,7 +12,7 @@ class AnthropicLLM { apiKey: process.env.ANTHROPIC_API_KEY, }); this.anthropic = anthropic; - this.model = process.env.ANTHROPIC_MODEL_PREF; + this.model = process.env.ANTHROPIC_MODEL_PREF || "claude-2"; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js index e0ccc316e..4d9770e66 100644 --- a/server/utils/AiProviders/lmStudio/index.js +++ b/server/utils/AiProviders/lmStudio/index.js @@ -73,7 +73,7 @@ Context: async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { if (!this.model) throw new Error( - `LMStudio chat: ${model} is not valid or defined for chat completion!` + `LMStudio chat: ${this.model} is not valid or defined for chat completion!` ); const textResponse = await this.lmstudio @@ -110,7 +110,7 @@ Context: async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { if (!this.model) throw new Error( - `LMStudio chat: ${model} is not valid or defined for chat completion!` + `LMStudio chat: ${this.model} is not valid or defined for chat completion!` ); const streamRequest = await this.lmstudio.createChatCompletion( diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index 33ed1b198..0c5b7116d 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -11,7 +11,7 @@ class OpenAiLLM extends OpenAiEmbedder { apiKey: process.env.OPEN_AI_KEY, }); this.openai = new OpenAIApi(config); - this.model = process.env.OPEN_MODEL_PREF; + this.model = process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo"; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, @@ -107,15 +107,14 @@ Context: } async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { - const model = process.env.OPEN_MODEL_PREF; - if (!(await this.isValidChatCompletionModel(model))) + if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( - `OpenAI chat: ${model} is not valid for chat completion!` + `OpenAI chat: ${this.model} is not valid for chat completion!` ); const textResponse = await this.openai .createChatCompletion({ - model, + model: this.model, temperature: Number(workspace?.openAiTemp ?? 0.7), n: 1, messages: await this.compressMessages( @@ -145,15 +144,14 @@ Context: } async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { - const model = process.env.OPEN_MODEL_PREF; - if (!(await this.isValidChatCompletionModel(model))) + if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( - `OpenAI chat: ${model} is not valid for chat completion!` + `OpenAI chat: ${this.model} is not valid for chat completion!` ); const streamRequest = await this.openai.createChatCompletion( { - model, + model: this.model, stream: true, temperature: Number(workspace?.openAiTemp ?? 0.7), n: 1,