2023-06-27 02:54:55 +02:00
|
|
|
const { v4: uuidv4 } = require("uuid");
|
2023-07-25 19:37:04 +02:00
|
|
|
const { reqBody, userFromSession, multiUserMode } = require("../utils/http");
|
2023-06-08 06:31:35 +02:00
|
|
|
const { Workspace } = require("../models/workspace");
|
2023-07-25 19:37:04 +02:00
|
|
|
const { validatedRequest } = require("../utils/middleware/validatedRequest");
|
|
|
|
const { WorkspaceChats } = require("../models/workspaceChats");
|
|
|
|
const { SystemSettings } = require("../models/systemSettings");
|
2023-08-15 02:42:17 +02:00
|
|
|
const { Telemetry } = require("../models/telemetry");
|
2023-11-14 00:07:30 +01:00
|
|
|
const {
|
|
|
|
streamChatWithWorkspace,
|
|
|
|
writeResponseChunk,
|
2024-01-16 19:37:46 +01:00
|
|
|
VALID_CHAT_MODE,
|
2023-11-14 00:07:30 +01:00
|
|
|
} = require("../utils/chats/stream");
|
2024-01-22 23:14:01 +01:00
|
|
|
const {
|
|
|
|
ROLES,
|
|
|
|
flexUserRoleValid,
|
|
|
|
} = require("../utils/middleware/multiUserProtected");
|
[FEAT] Automated audit logging (#667)
* WIP event logging - new table for events and new settings view for viewing
* WIP add logging
* UI for log rows
* rename files to Logging to prevent getting gitignore
* add metadata for all logging events and colored badges in logs page
* remove unneeded comment
* cleanup namespace for logging
* clean up backend calls
* update logging to show to => from settings changes
* add logging for invitations, created, deleted, and accepted
* add logging for user created, updated, suspended, or removed
* add logging for workspace deleted
* add logging for chat logs exported
* add logging for API keys, LLM, embedder, vector db, embed chat, and reset button
* modify event logs
* update to event log types
* simplify rendering of event badges
---------
Co-authored-by: timothycarambat <rambat1010@gmail.com>
2024-02-07 00:21:40 +01:00
|
|
|
const { EventLogs } = require("../models/eventLogs");
|
2024-02-09 03:37:22 +01:00
|
|
|
const {
|
|
|
|
validWorkspaceAndThreadSlug,
|
|
|
|
} = require("../utils/middleware/validWorkspace");
|
2023-06-04 04:28:07 +02:00
|
|
|
|
|
|
|
function chatEndpoints(app) {
|
|
|
|
if (!app) return;
|
|
|
|
|
2023-11-14 00:07:30 +01:00
|
|
|
app.post(
|
|
|
|
"/workspace/:slug/stream-chat",
|
2024-01-22 23:14:01 +01:00
|
|
|
[validatedRequest, flexUserRoleValid([ROLES.all])],
|
2023-11-14 00:07:30 +01:00
|
|
|
async (request, response) => {
|
|
|
|
try {
|
|
|
|
const user = await userFromSession(request, response);
|
|
|
|
const { slug } = request.params;
|
|
|
|
const { message, mode = "query" } = reqBody(request);
|
|
|
|
|
|
|
|
const workspace = multiUserMode(response)
|
|
|
|
? await Workspace.getWithUser(user, { slug })
|
|
|
|
: await Workspace.get({ slug });
|
|
|
|
|
|
|
|
if (!workspace) {
|
|
|
|
response.sendStatus(400).end();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-01-16 19:37:46 +01:00
|
|
|
if (!message?.length || !VALID_CHAT_MODE.includes(mode)) {
|
|
|
|
response.status(400).json({
|
|
|
|
id: uuidv4(),
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
sources: [],
|
|
|
|
close: true,
|
|
|
|
error: !message?.length
|
|
|
|
? "Message is empty."
|
|
|
|
: `${mode} is not a valid mode.`,
|
|
|
|
});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-11-14 00:07:30 +01:00
|
|
|
response.setHeader("Cache-Control", "no-cache");
|
|
|
|
response.setHeader("Content-Type", "text/event-stream");
|
|
|
|
response.setHeader("Access-Control-Allow-Origin", "*");
|
|
|
|
response.setHeader("Connection", "keep-alive");
|
|
|
|
response.flushHeaders();
|
|
|
|
|
2024-01-22 23:14:01 +01:00
|
|
|
if (multiUserMode(response) && user.role !== ROLES.admin) {
|
2023-11-14 00:07:30 +01:00
|
|
|
const limitMessagesSetting = await SystemSettings.get({
|
|
|
|
label: "limit_user_messages",
|
|
|
|
});
|
|
|
|
const limitMessages = limitMessagesSetting?.value === "true";
|
|
|
|
|
|
|
|
if (limitMessages) {
|
|
|
|
const messageLimitSetting = await SystemSettings.get({
|
|
|
|
label: "message_limit",
|
|
|
|
});
|
|
|
|
const systemLimit = Number(messageLimitSetting?.value);
|
|
|
|
|
|
|
|
if (!!systemLimit) {
|
|
|
|
const currentChatCount = await WorkspaceChats.count({
|
|
|
|
user_id: user.id,
|
|
|
|
createdAt: {
|
|
|
|
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
|
|
|
|
},
|
|
|
|
});
|
|
|
|
|
|
|
|
if (currentChatCount >= systemLimit) {
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
id: uuidv4(),
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
sources: [],
|
|
|
|
close: true,
|
|
|
|
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
|
|
|
|
});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
await streamChatWithWorkspace(response, workspace, message, mode, user);
|
|
|
|
await Telemetry.sendTelemetry("sent_chat", {
|
|
|
|
multiUserMode: multiUserMode(response),
|
|
|
|
LLMSelection: process.env.LLM_PROVIDER || "openai",
|
2023-12-07 17:53:37 +01:00
|
|
|
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
|
2023-11-14 00:07:30 +01:00
|
|
|
VectorDbSelection: process.env.VECTOR_DB || "pinecone",
|
|
|
|
});
|
[FEAT] Automated audit logging (#667)
* WIP event logging - new table for events and new settings view for viewing
* WIP add logging
* UI for log rows
* rename files to Logging to prevent getting gitignore
* add metadata for all logging events and colored badges in logs page
* remove unneeded comment
* cleanup namespace for logging
* clean up backend calls
* update logging to show to => from settings changes
* add logging for invitations, created, deleted, and accepted
* add logging for user created, updated, suspended, or removed
* add logging for workspace deleted
* add logging for chat logs exported
* add logging for API keys, LLM, embedder, vector db, embed chat, and reset button
* modify event logs
* update to event log types
* simplify rendering of event badges
---------
Co-authored-by: timothycarambat <rambat1010@gmail.com>
2024-02-07 00:21:40 +01:00
|
|
|
|
|
|
|
await EventLogs.logEvent(
|
|
|
|
"sent_chat",
|
|
|
|
{
|
|
|
|
workspaceName: workspace?.name,
|
|
|
|
chatModel: workspace?.chatModel || "System Default",
|
|
|
|
},
|
|
|
|
user?.id
|
|
|
|
);
|
2023-11-14 00:07:30 +01:00
|
|
|
response.end();
|
|
|
|
} catch (e) {
|
|
|
|
console.error(e);
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
id: uuidv4(),
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
sources: [],
|
|
|
|
close: true,
|
|
|
|
error: e.message,
|
|
|
|
});
|
|
|
|
response.end();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
);
|
2024-02-09 03:37:22 +01:00
|
|
|
|
|
|
|
app.post(
|
|
|
|
"/workspace/:slug/thread/:threadSlug/stream-chat",
|
|
|
|
[
|
|
|
|
validatedRequest,
|
|
|
|
flexUserRoleValid([ROLES.all]),
|
|
|
|
validWorkspaceAndThreadSlug,
|
|
|
|
],
|
|
|
|
async (request, response) => {
|
|
|
|
try {
|
|
|
|
const user = await userFromSession(request, response);
|
|
|
|
const { message, mode = "query" } = reqBody(request);
|
|
|
|
const workspace = response.locals.workspace;
|
|
|
|
const thread = response.locals.thread;
|
|
|
|
|
|
|
|
if (!message?.length || !VALID_CHAT_MODE.includes(mode)) {
|
|
|
|
response.status(400).json({
|
|
|
|
id: uuidv4(),
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
sources: [],
|
|
|
|
close: true,
|
|
|
|
error: !message?.length
|
|
|
|
? "Message is empty."
|
|
|
|
: `${mode} is not a valid mode.`,
|
|
|
|
});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
response.setHeader("Cache-Control", "no-cache");
|
|
|
|
response.setHeader("Content-Type", "text/event-stream");
|
|
|
|
response.setHeader("Access-Control-Allow-Origin", "*");
|
|
|
|
response.setHeader("Connection", "keep-alive");
|
|
|
|
response.flushHeaders();
|
|
|
|
|
|
|
|
if (multiUserMode(response) && user.role !== ROLES.admin) {
|
|
|
|
const limitMessagesSetting = await SystemSettings.get({
|
|
|
|
label: "limit_user_messages",
|
|
|
|
});
|
|
|
|
const limitMessages = limitMessagesSetting?.value === "true";
|
|
|
|
|
|
|
|
if (limitMessages) {
|
|
|
|
const messageLimitSetting = await SystemSettings.get({
|
|
|
|
label: "message_limit",
|
|
|
|
});
|
|
|
|
const systemLimit = Number(messageLimitSetting?.value);
|
|
|
|
|
|
|
|
if (!!systemLimit) {
|
|
|
|
// Chat qty includes all threads because any user can freely
|
|
|
|
// create threads and would bypass this rule.
|
|
|
|
const currentChatCount = await WorkspaceChats.count({
|
|
|
|
user_id: user.id,
|
|
|
|
createdAt: {
|
|
|
|
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
|
|
|
|
},
|
|
|
|
});
|
|
|
|
|
|
|
|
if (currentChatCount >= systemLimit) {
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
id: uuidv4(),
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
sources: [],
|
|
|
|
close: true,
|
|
|
|
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
|
|
|
|
});
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
await streamChatWithWorkspace(
|
|
|
|
response,
|
|
|
|
workspace,
|
|
|
|
message,
|
|
|
|
mode,
|
|
|
|
user,
|
|
|
|
thread
|
|
|
|
);
|
|
|
|
await Telemetry.sendTelemetry("sent_chat", {
|
|
|
|
multiUserMode: multiUserMode(response),
|
|
|
|
LLMSelection: process.env.LLM_PROVIDER || "openai",
|
|
|
|
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
|
|
|
|
VectorDbSelection: process.env.VECTOR_DB || "pinecone",
|
|
|
|
});
|
|
|
|
|
|
|
|
await EventLogs.logEvent(
|
|
|
|
"sent_chat",
|
|
|
|
{
|
|
|
|
workspaceName: workspace.name,
|
|
|
|
thread: thread.name,
|
|
|
|
chatModel: workspace?.chatModel || "System Default",
|
|
|
|
},
|
|
|
|
user?.id
|
|
|
|
);
|
|
|
|
response.end();
|
|
|
|
} catch (e) {
|
|
|
|
console.error(e);
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
id: uuidv4(),
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
sources: [],
|
|
|
|
close: true,
|
|
|
|
error: e.message,
|
|
|
|
});
|
|
|
|
response.end();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
);
|
2023-06-04 04:28:07 +02:00
|
|
|
}
|
|
|
|
|
2023-06-08 06:31:35 +02:00
|
|
|
module.exports = { chatEndpoints };
|