assume default model where appropriate (#366)

* assume default model where appropriate

* merge with master and fix other model refs
This commit is contained in:
Timothy Carambat 2023-11-13 15:17:22 -08:00 committed by GitHub
parent c22c50cca8
commit 8743be679b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 10 additions and 12 deletions

View File

@ -12,7 +12,7 @@ class AnthropicLLM {
apiKey: process.env.ANTHROPIC_API_KEY, apiKey: process.env.ANTHROPIC_API_KEY,
}); });
this.anthropic = anthropic; this.anthropic = anthropic;
this.model = process.env.ANTHROPIC_MODEL_PREF; this.model = process.env.ANTHROPIC_MODEL_PREF || "claude-2";
this.limits = { this.limits = {
history: this.promptWindowLimit() * 0.15, history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15,

View File

@ -73,7 +73,7 @@ Context:
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.model) if (!this.model)
throw new Error( throw new Error(
`LMStudio chat: ${model} is not valid or defined for chat completion!` `LMStudio chat: ${this.model} is not valid or defined for chat completion!`
); );
const textResponse = await this.lmstudio const textResponse = await this.lmstudio
@ -110,7 +110,7 @@ Context:
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.model) if (!this.model)
throw new Error( throw new Error(
`LMStudio chat: ${model} is not valid or defined for chat completion!` `LMStudio chat: ${this.model} is not valid or defined for chat completion!`
); );
const streamRequest = await this.lmstudio.createChatCompletion( const streamRequest = await this.lmstudio.createChatCompletion(

View File

@ -11,7 +11,7 @@ class OpenAiLLM extends OpenAiEmbedder {
apiKey: process.env.OPEN_AI_KEY, apiKey: process.env.OPEN_AI_KEY,
}); });
this.openai = new OpenAIApi(config); this.openai = new OpenAIApi(config);
this.model = process.env.OPEN_MODEL_PREF; this.model = process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo";
this.limits = { this.limits = {
history: this.promptWindowLimit() * 0.15, history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15,
@ -107,15 +107,14 @@ Context:
} }
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const model = process.env.OPEN_MODEL_PREF; if (!(await this.isValidChatCompletionModel(this.model)))
if (!(await this.isValidChatCompletionModel(model)))
throw new Error( throw new Error(
`OpenAI chat: ${model} is not valid for chat completion!` `OpenAI chat: ${this.model} is not valid for chat completion!`
); );
const textResponse = await this.openai const textResponse = await this.openai
.createChatCompletion({ .createChatCompletion({
model, model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7), temperature: Number(workspace?.openAiTemp ?? 0.7),
n: 1, n: 1,
messages: await this.compressMessages( messages: await this.compressMessages(
@ -145,15 +144,14 @@ Context:
} }
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const model = process.env.OPEN_MODEL_PREF; if (!(await this.isValidChatCompletionModel(this.model)))
if (!(await this.isValidChatCompletionModel(model)))
throw new Error( throw new Error(
`OpenAI chat: ${model} is not valid for chat completion!` `OpenAI chat: ${this.model} is not valid for chat completion!`
); );
const streamRequest = await this.openai.createChatCompletion( const streamRequest = await this.openai.createChatCompletion(
{ {
model, model: this.model,
stream: true, stream: true,
temperature: Number(workspace?.openAiTemp ?? 0.7), temperature: Number(workspace?.openAiTemp ?? 0.7),
n: 1, n: 1,