mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-11 01:10:11 +01:00
[CHORE] Remove sendChat and streamChat in all LLM providers (#1260)
* remove sendChat and streamChat functions/references in all LLM providers * remove unused imports --------- Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
parent
a156a1e58c
commit
9feaad79cc
@ -1,5 +1,4 @@
|
||||
const { v4 } = require("uuid");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
@ -33,7 +32,7 @@ class AnthropicLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -110,31 +109,6 @@ class AnthropicLLM {
|
||||
}
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.isValidChatCompletionModel(this.model))
|
||||
throw new Error(
|
||||
`Anthropic chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
|
||||
const streamRequest = await this.anthropic.messages.stream({
|
||||
model: this.model,
|
||||
max_tokens: 4096,
|
||||
system: messages[0].content, // Strip out the system message
|
||||
messages: messages.slice(1), // Pop off the system message
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!this.isValidChatCompletionModel(this.model))
|
||||
throw new Error(
|
||||
|
@ -1,5 +1,4 @@
|
||||
const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
@ -45,7 +44,7 @@ class AzureOpenAiLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Sure the user selected a proper value for the token limit
|
||||
@ -82,66 +81,6 @@ class AzureOpenAiLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
"No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5."
|
||||
);
|
||||
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
const textResponse = await this.openai
|
||||
.getChatCompletions(this.model, messages, {
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
})
|
||||
.then((res) => {
|
||||
if (!res.hasOwnProperty("choices"))
|
||||
throw new Error("AzureOpenAI chat: No results!");
|
||||
if (res.choices.length === 0)
|
||||
throw new Error("AzureOpenAI chat: No results length!");
|
||||
return res.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
console.log(error);
|
||||
throw new Error(
|
||||
`AzureOpenAI::getChatCompletions failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
"No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5."
|
||||
);
|
||||
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
const stream = await this.openai.streamChatCompletions(
|
||||
this.model,
|
||||
messages,
|
||||
{
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
}
|
||||
);
|
||||
return stream;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = [], { temperature = 0.7 }) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
|
@ -1,4 +1,3 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
@ -48,7 +47,7 @@ class GeminiLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -118,32 +117,6 @@ class GeminiLLM {
|
||||
return allMessages;
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.isValidChatCompletionModel(this.model))
|
||||
throw new Error(
|
||||
`Gemini chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const compressedHistory = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
|
||||
const chatThread = this.gemini.startChat({
|
||||
history: this.formatMessages(compressedHistory),
|
||||
});
|
||||
const result = await chatThread.sendMessage(prompt);
|
||||
const response = result.response;
|
||||
const responseText = response.text();
|
||||
|
||||
if (!responseText) throw new Error("Gemini: No response could be parsed.");
|
||||
|
||||
return responseText;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = [], _opts = {}) {
|
||||
if (!this.isValidChatCompletionModel(this.model))
|
||||
throw new Error(
|
||||
@ -165,30 +138,6 @@ class GeminiLLM {
|
||||
return responseText;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.isValidChatCompletionModel(this.model))
|
||||
throw new Error(
|
||||
`Gemini chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const compressedHistory = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
|
||||
const chatThread = this.gemini.startChat({
|
||||
history: this.formatMessages(compressedHistory),
|
||||
});
|
||||
const responseStream = await chatThread.sendMessageStream(prompt);
|
||||
if (!responseStream.stream)
|
||||
throw new Error("Could not stream response stream from Gemini.");
|
||||
|
||||
return responseStream.stream;
|
||||
}
|
||||
|
||||
async streamGetChatCompletion(messages = [], _opts = {}) {
|
||||
if (!this.isValidChatCompletionModel(this.model))
|
||||
throw new Error(
|
||||
|
@ -1,5 +1,4 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -53,7 +52,7 @@ class GenericOpenAiLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Ensure the user set a value for the token limit
|
||||
@ -89,55 +88,6 @@ class GenericOpenAiLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("GenericOpenAI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("GenericOpenAI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`GenericOpenAI::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
const result = await this.openai.chat.completions
|
||||
.create({
|
||||
|
@ -1,5 +1,4 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -38,7 +37,7 @@ class GroqLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -91,65 +90,6 @@ class GroqLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Groq chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("GroqAI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("GroqAI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`GroqAI::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`GroqAI:streamChat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
|
@ -1,6 +1,5 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -48,7 +47,7 @@ class HuggingFaceLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -90,55 +89,6 @@ class HuggingFaceLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("HuggingFace chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("HuggingFace chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`HuggingFace::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
const result = await this.openai.createChatCompletion({
|
||||
model: this.model,
|
||||
|
@ -1,4 +1,3 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -49,7 +48,7 @@ class LMStudioLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Ensure the user set a value for the token limit
|
||||
@ -85,65 +84,6 @@ class LMStudioLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
`LMStudio chat: ${this.model} is not valid or defined for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.lmstudio.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("LMStudio chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("LMStudio chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`LMStudio::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
`LMStudio chat: ${this.model} is not valid or defined for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.lmstudio.chat.completions.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
stream: true,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
|
@ -1,4 +1,3 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -41,7 +40,7 @@ class LocalAiLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Ensure the user set a value for the token limit
|
||||
@ -75,65 +74,6 @@ class LocalAiLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`LocalAI chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("LocalAI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("LocalAI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`LocalAI::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`LocalAI chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
|
@ -1,4 +1,3 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -42,7 +41,7 @@ class MistralLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -70,64 +69,6 @@ class MistralLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Mistral chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Mistral chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Mistral chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`Mistral::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Mistral chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
|
@ -1,7 +1,6 @@
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
@ -94,7 +93,7 @@ class NativeLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Ensure the user set a value for the token limit
|
||||
@ -123,45 +122,6 @@ class NativeLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
try {
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
|
||||
const model = await this.#llamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const response = await model.call(messages);
|
||||
return response.content;
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`NativeLLM::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const model = await this.#llamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
const responseStream = await model.stream(messages);
|
||||
return responseStream;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
const model = await this.#llamaClient({ temperature });
|
||||
const response = await model.call(messages);
|
||||
|
@ -1,4 +1,3 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { StringOutputParser } = require("@langchain/core/output_parsers");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
@ -74,7 +73,7 @@ class OllamaAILLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Ensure the user set a value for the token limit
|
||||
@ -108,53 +107,6 @@ class OllamaAILLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
|
||||
const model = this.#ollamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const textResponse = await model
|
||||
.pipe(new StringOutputParser())
|
||||
.invoke(this.#convertToLangchainPrototypes(messages))
|
||||
.catch((e) => {
|
||||
throw new Error(
|
||||
`Ollama::getChatCompletion failed to communicate with Ollama. ${e.message}`
|
||||
);
|
||||
});
|
||||
|
||||
if (!textResponse || !textResponse.length)
|
||||
throw new Error(`Ollama::sendChat text response was empty.`);
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
);
|
||||
|
||||
const model = this.#ollamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const stream = await model
|
||||
.pipe(new StringOutputParser())
|
||||
.stream(this.#convertToLangchainPrototypes(messages));
|
||||
return stream;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
const model = this.#ollamaClient({ temperature });
|
||||
const textResponse = await model
|
||||
|
@ -1,5 +1,4 @@
|
||||
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -41,7 +40,7 @@ class OpenAiLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -122,65 +121,6 @@ class OpenAiLLM {
|
||||
return { safe: false, reasons };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`OpenAI chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("OpenAI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("OpenAI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`OpenAI::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`OpenAI chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
|
@ -1,10 +1,8 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { v4: uuidv4 } = require("uuid");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
@ -99,7 +97,7 @@ class OpenRouterLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -131,65 +129,6 @@ class OpenRouterLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`OpenRouter chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("OpenRouter chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("OpenRouter chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`OpenRouter::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`OpenRouter chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
@ -304,143 +243,6 @@ class OpenRouterLLM {
|
||||
});
|
||||
}
|
||||
|
||||
// handleStream(response, stream, responseProps) {
|
||||
// const timeoutThresholdMs = 500;
|
||||
// const { uuid = uuidv4(), sources = [] } = responseProps;
|
||||
|
||||
// return new Promise((resolve) => {
|
||||
// let fullText = "";
|
||||
// let chunk = "";
|
||||
// let lastChunkTime = null; // null when first token is still not received.
|
||||
|
||||
// // Establish listener to early-abort a streaming response
|
||||
// // in case things go sideways or the user does not like the response.
|
||||
// // We preserve the generated text but continue as if chat was completed
|
||||
// // to preserve previously generated content.
|
||||
// const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
// response.on("close", handleAbort);
|
||||
|
||||
// // NOTICE: Not all OpenRouter models will return a stop reason
|
||||
// // which keeps the connection open and so the model never finalizes the stream
|
||||
// // like the traditional OpenAI response schema does. So in the case the response stream
|
||||
// // never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
|
||||
// // no new chunks then we kill the stream and assume it to be complete. OpenRouter is quite fast
|
||||
// // so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
|
||||
// // we find it is too aggressive.
|
||||
// const timeoutCheck = setInterval(() => {
|
||||
// if (lastChunkTime === null) return;
|
||||
|
||||
// const now = Number(new Date());
|
||||
// const diffMs = now - lastChunkTime;
|
||||
// if (diffMs >= timeoutThresholdMs) {
|
||||
// console.log(
|
||||
// `OpenRouter stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
|
||||
// );
|
||||
// writeResponseChunk(response, {
|
||||
// uuid,
|
||||
// sources,
|
||||
// type: "textResponseChunk",
|
||||
// textResponse: "",
|
||||
// close: true,
|
||||
// error: false,
|
||||
// });
|
||||
// clearInterval(timeoutCheck);
|
||||
// response.removeListener("close", handleAbort);
|
||||
// resolve(fullText);
|
||||
// }
|
||||
// }, 500);
|
||||
|
||||
// stream.data.on("data", (data) => {
|
||||
// const lines = data
|
||||
// ?.toString()
|
||||
// ?.split("\n")
|
||||
// .filter((line) => line.trim() !== "");
|
||||
|
||||
// for (const line of lines) {
|
||||
// let validJSON = false;
|
||||
// const message = chunk + line.replace(/^data: /, "");
|
||||
|
||||
// // JSON chunk is incomplete and has not ended yet
|
||||
// // so we need to stitch it together. You would think JSON
|
||||
// // chunks would only come complete - but they don't!
|
||||
// try {
|
||||
// JSON.parse(message);
|
||||
// validJSON = true;
|
||||
// } catch { }
|
||||
|
||||
// if (!validJSON) {
|
||||
// // It can be possible that the chunk decoding is running away
|
||||
// // and the message chunk fails to append due to string length.
|
||||
// // In this case abort the chunk and reset so we can continue.
|
||||
// // ref: https://github.com/Mintplex-Labs/anything-llm/issues/416
|
||||
// try {
|
||||
// chunk += message;
|
||||
// } catch (e) {
|
||||
// console.error(`Chunk appending error`, e);
|
||||
// chunk = "";
|
||||
// }
|
||||
// continue;
|
||||
// } else {
|
||||
// chunk = "";
|
||||
// }
|
||||
|
||||
// if (message == "[DONE]") {
|
||||
// lastChunkTime = Number(new Date());
|
||||
// writeResponseChunk(response, {
|
||||
// uuid,
|
||||
// sources,
|
||||
// type: "textResponseChunk",
|
||||
// textResponse: "",
|
||||
// close: true,
|
||||
// error: false,
|
||||
// });
|
||||
// clearInterval(timeoutCheck);
|
||||
// response.removeListener("close", handleAbort);
|
||||
// resolve(fullText);
|
||||
// } else {
|
||||
// let finishReason = null;
|
||||
// let token = "";
|
||||
// try {
|
||||
// const json = JSON.parse(message);
|
||||
// token = json?.choices?.[0]?.delta?.content;
|
||||
// finishReason = json?.choices?.[0]?.finish_reason || null;
|
||||
// } catch {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// if (token) {
|
||||
// fullText += token;
|
||||
// lastChunkTime = Number(new Date());
|
||||
// writeResponseChunk(response, {
|
||||
// uuid,
|
||||
// sources: [],
|
||||
// type: "textResponseChunk",
|
||||
// textResponse: token,
|
||||
// close: false,
|
||||
// error: false,
|
||||
// });
|
||||
// }
|
||||
|
||||
// if (finishReason !== null) {
|
||||
// lastChunkTime = Number(new Date());
|
||||
// writeResponseChunk(response, {
|
||||
// uuid,
|
||||
// sources,
|
||||
// type: "textResponseChunk",
|
||||
// textResponse: "",
|
||||
// close: true,
|
||||
// error: false,
|
||||
// });
|
||||
// clearInterval(timeoutCheck);
|
||||
// response.removeListener("close", handleAbort);
|
||||
// resolve(fullText);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// });
|
||||
// });
|
||||
// }
|
||||
|
||||
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||
async embedTextInput(textInput) {
|
||||
return await this.embedder.embedTextInput(textInput);
|
||||
|
@ -1,5 +1,4 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -50,7 +49,7 @@ class PerplexityLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
@ -81,65 +80,6 @@ class PerplexityLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Perplexity chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Perplexity chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Perplexity chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`Perplexity::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Perplexity chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
|
@ -1,4 +1,3 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
@ -49,7 +48,7 @@ class TogetherAiLLM {
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
// Ensure the user set a value for the token limit
|
||||
@ -82,65 +81,6 @@ class TogetherAiLLM {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Together AI chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Together AI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Together AI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`TogetherAI::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`TogetherAI chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
|
Loading…
Reference in New Issue
Block a user