anything-llm/server/utils/chats/index.js
Timothy Carambat 2a556c275c
Implement Workspace-specific setting configs + other technical features (#58)
* 1. Define LLM Temperature as a workspace setting
2. Implement rudimentry table migration code for both new and existing repos to bring tables up to date
3. Trigger for workspace on update to update timestamp
4. Always fallback temp to 0.7
5. Extract WorkspaceModal into Tabbed content
6. Remove workspace name UNIQUE constraint (cannot be migrated :()
7. Add slug +seed when existing slug is already take
8. Seperate name from slug so display names can be changed

* remove blocking test return
2023-06-14 23:12:59 -07:00

147 lines
3.6 KiB
JavaScript

const { v4: uuidv4 } = require("uuid");
const { OpenAi } = require("../openAi");
const { WorkspaceChats } = require("../../models/workspaceChats");
const { resetMemory } = require("./commands/reset");
const moment = require("moment");
const { getVectorDbClass } = require("../helpers");
function convertToChatHistory(history = []) {
const formattedHistory = [];
history.forEach((history) => {
const { prompt, response, createdAt } = history;
const data = JSON.parse(response);
formattedHistory.push([
{
role: "user",
content: prompt,
sentAt: moment(createdAt).unix(),
},
{
role: "assistant",
content: data.text,
sources: data.sources || [],
sentAt: moment(createdAt).unix(),
},
]);
});
return formattedHistory.flat();
}
function convertToPromptHistory(history = []) {
const formattedHistory = [];
history.forEach((history) => {
const { prompt, response } = history;
const data = JSON.parse(response);
formattedHistory.push([
{ role: "user", content: prompt },
{ role: "assistant", content: data.text },
]);
});
return formattedHistory.flat();
}
const VALID_COMMANDS = {
"/reset": resetMemory,
};
function grepCommand(message) {
const availableCommands = Object.keys(VALID_COMMANDS);
for (let i = 0; i < availableCommands.length; i++) {
const cmd = availableCommands[i];
const re = new RegExp(`^(${cmd})`, "i");
if (re.test(message)) {
return cmd;
}
}
return null;
}
async function chatWithWorkspace(workspace, message, chatMode = "query") {
const uuid = uuidv4();
const openai = new OpenAi();
const VectorDb = getVectorDbClass();
const command = grepCommand(message);
if (!!command && Object.keys(VALID_COMMANDS).includes(command)) {
return await VALID_COMMANDS[command](workspace, message, uuid);
}
const { safe, reasons = [] } = await openai.isSafe(message);
if (!safe) {
return {
id: uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: `This message was moderated and will not be allowed. Violations for ${reasons.join(
", "
)} found.`,
};
}
const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
if (!hasVectorizedSpace) {
const rawHistory = await WorkspaceChats.forWorkspace(workspace.id);
const chatHistory = convertToPromptHistory(rawHistory);
const response = await openai.sendChat(chatHistory, message, workspace);
const data = { text: response, sources: [], type: "chat" };
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: data,
});
return {
id: uuid,
type: "textResponse",
textResponse: response,
sources: [],
close: true,
error: null,
};
} else {
const {
response,
sources,
message: error,
} = await VectorDb[chatMode]({
namespace: workspace.slug,
input: message,
workspace,
});
if (!response) {
return {
id: uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error,
};
}
const data = { text: response, sources, type: chatMode };
await WorkspaceChats.new({
workspaceId: workspace.id,
prompt: message,
response: data,
});
return {
id: uuid,
type: "textResponse",
textResponse: response,
sources,
close: true,
error,
};
}
}
module.exports = {
convertToChatHistory,
chatWithWorkspace,
};