mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-14 10:30:10 +01:00
51dbff0dcb
Remove useless icons in prompt bar Add chatMode column to workspaces that defaults to chat Add UI for toggle of chat mode with hint Update UI for workspace settings to match designs
233 lines
7.4 KiB
JavaScript
233 lines
7.4 KiB
JavaScript
const { v4: uuidv4 } = require("uuid");
|
|
const { reqBody, userFromSession, multiUserMode } = require("../utils/http");
|
|
const { validatedRequest } = require("../utils/middleware/validatedRequest");
|
|
const { WorkspaceChats } = require("../models/workspaceChats");
|
|
const { SystemSettings } = require("../models/systemSettings");
|
|
const { Telemetry } = require("../models/telemetry");
|
|
const { streamChatWithWorkspace } = require("../utils/chats/stream");
|
|
const {
|
|
ROLES,
|
|
flexUserRoleValid,
|
|
} = require("../utils/middleware/multiUserProtected");
|
|
const { EventLogs } = require("../models/eventLogs");
|
|
const {
|
|
validWorkspaceAndThreadSlug,
|
|
validWorkspaceSlug,
|
|
} = require("../utils/middleware/validWorkspace");
|
|
const { writeResponseChunk } = require("../utils/helpers/chat/responses");
|
|
|
|
function chatEndpoints(app) {
|
|
if (!app) return;
|
|
|
|
app.post(
|
|
"/workspace/:slug/stream-chat",
|
|
[validatedRequest, flexUserRoleValid([ROLES.all]), validWorkspaceSlug],
|
|
async (request, response) => {
|
|
try {
|
|
const user = await userFromSession(request, response);
|
|
const { message } = reqBody(request);
|
|
const workspace = response.locals.workspace;
|
|
|
|
if (!message?.length) {
|
|
response.status(400).json({
|
|
id: uuidv4(),
|
|
type: "abort",
|
|
textResponse: null,
|
|
sources: [],
|
|
close: true,
|
|
error: !message?.length ? "Message is empty." : null,
|
|
});
|
|
return;
|
|
}
|
|
|
|
response.setHeader("Cache-Control", "no-cache");
|
|
response.setHeader("Content-Type", "text/event-stream");
|
|
response.setHeader("Access-Control-Allow-Origin", "*");
|
|
response.setHeader("Connection", "keep-alive");
|
|
response.flushHeaders();
|
|
|
|
if (multiUserMode(response) && user.role !== ROLES.admin) {
|
|
const limitMessagesSetting = await SystemSettings.get({
|
|
label: "limit_user_messages",
|
|
});
|
|
const limitMessages = limitMessagesSetting?.value === "true";
|
|
|
|
if (limitMessages) {
|
|
const messageLimitSetting = await SystemSettings.get({
|
|
label: "message_limit",
|
|
});
|
|
const systemLimit = Number(messageLimitSetting?.value);
|
|
|
|
if (!!systemLimit) {
|
|
const currentChatCount = await WorkspaceChats.count({
|
|
user_id: user.id,
|
|
createdAt: {
|
|
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
|
|
},
|
|
});
|
|
|
|
if (currentChatCount >= systemLimit) {
|
|
writeResponseChunk(response, {
|
|
id: uuidv4(),
|
|
type: "abort",
|
|
textResponse: null,
|
|
sources: [],
|
|
close: true,
|
|
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
|
|
});
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
await streamChatWithWorkspace(
|
|
response,
|
|
workspace,
|
|
message,
|
|
workspace?.chatMode,
|
|
user
|
|
);
|
|
await Telemetry.sendTelemetry("sent_chat", {
|
|
multiUserMode: multiUserMode(response),
|
|
LLMSelection: process.env.LLM_PROVIDER || "openai",
|
|
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
|
|
VectorDbSelection: process.env.VECTOR_DB || "pinecone",
|
|
});
|
|
|
|
await EventLogs.logEvent(
|
|
"sent_chat",
|
|
{
|
|
workspaceName: workspace?.name,
|
|
chatModel: workspace?.chatModel || "System Default",
|
|
},
|
|
user?.id
|
|
);
|
|
response.end();
|
|
} catch (e) {
|
|
console.error(e);
|
|
writeResponseChunk(response, {
|
|
id: uuidv4(),
|
|
type: "abort",
|
|
textResponse: null,
|
|
sources: [],
|
|
close: true,
|
|
error: e.message,
|
|
});
|
|
response.end();
|
|
}
|
|
}
|
|
);
|
|
|
|
app.post(
|
|
"/workspace/:slug/thread/:threadSlug/stream-chat",
|
|
[
|
|
validatedRequest,
|
|
flexUserRoleValid([ROLES.all]),
|
|
validWorkspaceAndThreadSlug,
|
|
],
|
|
async (request, response) => {
|
|
try {
|
|
const user = await userFromSession(request, response);
|
|
const { message } = reqBody(request);
|
|
const workspace = response.locals.workspace;
|
|
const thread = response.locals.thread;
|
|
|
|
if (!message?.length) {
|
|
response.status(400).json({
|
|
id: uuidv4(),
|
|
type: "abort",
|
|
textResponse: null,
|
|
sources: [],
|
|
close: true,
|
|
error: !message?.length ? "Message is empty." : null,
|
|
});
|
|
return;
|
|
}
|
|
|
|
response.setHeader("Cache-Control", "no-cache");
|
|
response.setHeader("Content-Type", "text/event-stream");
|
|
response.setHeader("Access-Control-Allow-Origin", "*");
|
|
response.setHeader("Connection", "keep-alive");
|
|
response.flushHeaders();
|
|
|
|
if (multiUserMode(response) && user.role !== ROLES.admin) {
|
|
const limitMessagesSetting = await SystemSettings.get({
|
|
label: "limit_user_messages",
|
|
});
|
|
const limitMessages = limitMessagesSetting?.value === "true";
|
|
|
|
if (limitMessages) {
|
|
const messageLimitSetting = await SystemSettings.get({
|
|
label: "message_limit",
|
|
});
|
|
const systemLimit = Number(messageLimitSetting?.value);
|
|
|
|
if (!!systemLimit) {
|
|
// Chat qty includes all threads because any user can freely
|
|
// create threads and would bypass this rule.
|
|
const currentChatCount = await WorkspaceChats.count({
|
|
user_id: user.id,
|
|
createdAt: {
|
|
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
|
|
},
|
|
});
|
|
|
|
if (currentChatCount >= systemLimit) {
|
|
writeResponseChunk(response, {
|
|
id: uuidv4(),
|
|
type: "abort",
|
|
textResponse: null,
|
|
sources: [],
|
|
close: true,
|
|
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
|
|
});
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
await streamChatWithWorkspace(
|
|
response,
|
|
workspace,
|
|
message,
|
|
workspace?.chatMode,
|
|
user,
|
|
thread
|
|
);
|
|
await Telemetry.sendTelemetry("sent_chat", {
|
|
multiUserMode: multiUserMode(response),
|
|
LLMSelection: process.env.LLM_PROVIDER || "openai",
|
|
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
|
|
VectorDbSelection: process.env.VECTOR_DB || "pinecone",
|
|
});
|
|
|
|
await EventLogs.logEvent(
|
|
"sent_chat",
|
|
{
|
|
workspaceName: workspace.name,
|
|
thread: thread.name,
|
|
chatModel: workspace?.chatModel || "System Default",
|
|
},
|
|
user?.id
|
|
);
|
|
response.end();
|
|
} catch (e) {
|
|
console.error(e);
|
|
writeResponseChunk(response, {
|
|
id: uuidv4(),
|
|
type: "abort",
|
|
textResponse: null,
|
|
sources: [],
|
|
close: true,
|
|
error: e.message,
|
|
});
|
|
response.end();
|
|
}
|
|
}
|
|
);
|
|
}
|
|
|
|
module.exports = { chatEndpoints };
|