anything-llm/server/utils/AiProviders/localAi/index.js
Timothy Carambat 4bb99ab4bf
Support LocalAi as LLM provider by @tlandenberger (#373)
* feature: add LocalAI as llm provider

* update Onboarding/mgmt settings
Grab models from models endpoint for localai
merge with master

* update streaming for complete chunk streaming
update localAI LLM to be able to stream

* force schema on URL

---------
Co-authored-by: timothycarambat <rambat1010@gmail.com>
Co-authored-by: tlandenberger <tobiaslandenberger@gmail.com>
2023-11-14 12:31:44 -08:00

183 lines
5.2 KiB
JavaScript

const { chatPrompt } = require("../../chats");
class LocalAiLLM {
constructor(embedder = null) {
if (!process.env.LOCAL_AI_BASE_PATH)
throw new Error("No LocalAI Base Path was set.");
const { Configuration, OpenAIApi } = require("openai");
const config = new Configuration({
basePath: process.env.LOCAL_AI_BASE_PATH,
});
this.openai = new OpenAIApi(config);
this.model = process.env.LOCAL_AI_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
if (!embedder)
throw new Error(
"INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
);
this.embedder = embedder;
}
streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.LOCAL_AI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No LocalAi token context limit was set.");
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}
Context:
${contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async isSafe(_input = "") {
// Not implemented so must be stubbed
return { safe: true, reasons: [] };
}
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`LocalAI chat: ${this.model} is not valid for chat completion!`
);
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
})
.then((json) => {
const res = json.data;
if (!res.hasOwnProperty("choices"))
throw new Error("LocalAI chat: No results!");
if (res.choices.length === 0)
throw new Error("LocalAI chat: No results length!");
return res.choices[0].message.content;
})
.catch((error) => {
throw new Error(
`LocalAI::createChatCompletion failed with: ${error.message}`
);
});
return textResponse;
}
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`LocalAI chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? 0.7),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
},
{ responseType: "stream" }
);
return streamRequest;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`LocalAI chat: ${this.model} is not valid for chat completion!`
);
const { data } = await this.openai.createChatCompletion({
model: this.model,
messages,
temperature,
});
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`LocalAi chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
messages,
temperature,
},
{ responseType: "stream" }
);
return streamRequest;
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
LocalAiLLM,
};