anything-llm/server/utils/chats/index.js

85 lines
2.6 KiB
JavaScript

const { v4: uuidv4 } = require("uuid");
const { WorkspaceChats } = require("../../models/workspaceChats");
const { resetMemory } = require("./commands/reset");
const { convertToPromptHistory } = require("../helpers/chat/responses");
const { SlashCommandPresets } = require("../../models/slashCommandsPresets");
const VALID_COMMANDS = {
"/reset": resetMemory,
};
async function grepCommand(message, user = null) {
const userPresets = await SlashCommandPresets.getUserPresets(user?.id);
const availableCommands = Object.keys(VALID_COMMANDS);
// Check if the message starts with any built-in command
for (let i = 0; i < availableCommands.length; i++) {
const cmd = availableCommands[i];
const re = new RegExp(`^(${cmd})`, "i");
if (re.test(message)) {
return cmd;
}
}
// Replace all preset commands with their corresponding prompts
// Allows multiple commands in one message
let updatedMessage = message;
for (const preset of userPresets) {
const regex = new RegExp(
`(?:\\b\\s|^)(${preset.command})(?:\\b\\s|$)`,
"g"
);
updatedMessage = updatedMessage.replace(regex, preset.prompt);
}
return updatedMessage;
}
async function recentChatHistory({
user = null,
workspace,
thread = null,
messageLimit = 20,
apiSessionId = null,
}) {
const rawHistory = (
await WorkspaceChats.where(
{
workspaceId: workspace.id,
user_id: user?.id || null,
thread_id: thread?.id || null,
api_session_id: apiSessionId || null,
include: true,
},
messageLimit,
{ id: "desc" }
)
).reverse();
return { rawHistory, chatHistory: convertToPromptHistory(rawHistory) };
}
function chatPrompt(workspace) {
return (
workspace?.openAiPrompt ??
"Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
);
}
// We use this util function to deduplicate sources from similarity searching
// if the document is already pinned.
// Eg: You pin a csv, if we RAG + full-text that you will get the same data
// points both in the full-text and possibly from RAG - result in bad results
// even if the LLM was not even going to hallucinate.
function sourceIdentifier(sourceDocument) {
if (!sourceDocument?.title || !sourceDocument?.published) return uuidv4();
return `title:${sourceDocument.title}-timestamp:${sourceDocument.published}`;
}
module.exports = {
sourceIdentifier,
recentChatHistory,
chatPrompt,
grepCommand,
VALID_COMMANDS,
};