diff --git a/frontend/src/pages/WorkspaceSettings/GeneralAppearance/WorkspacePfp/index.jsx b/frontend/src/pages/WorkspaceSettings/GeneralAppearance/WorkspacePfp/index.jsx
new file mode 100644
index 00000000..e9fb8303
--- /dev/null
+++ b/frontend/src/pages/WorkspaceSettings/GeneralAppearance/WorkspacePfp/index.jsx
@@ -0,0 +1,96 @@
+import Workspace from "@/models/workspace";
+import showToast from "@/utils/toast";
+import { Plus } from "@phosphor-icons/react";
+import { useEffect, useState } from "react";
+
+export default function WorkspacePfp({ workspace, slug }) {
+ const [pfp, setPfp] = useState(null);
+
+ useEffect(() => {
+ async function fetchWorkspace() {
+ const pfpUrl = await Workspace.fetchPfp(slug);
+ setPfp(pfpUrl);
+ }
+ fetchWorkspace();
+ }, [slug]);
+
+ const handleFileUpload = async (event) => {
+ const file = event.target.files[0];
+ if (!file) return false;
+
+ const formData = new FormData();
+ formData.append("file", file);
+ const { success, error } = await Workspace.uploadPfp(
+ formData,
+ workspace.slug
+ );
+ if (!success) {
+ showToast(`Failed to upload profile picture: ${error}`, "error");
+ return;
+ }
+
+ const pfpUrl = await Workspace.fetchPfp(workspace.slug);
+ setPfp(pfpUrl);
+ showToast("Profile picture uploaded.", "success");
+ };
+
+ const handleRemovePfp = async () => {
+ const { success, error } = await Workspace.removePfp(workspace.slug);
+ if (!success) {
+ showToast(`Failed to remove profile picture: ${error}`, "error");
+ return;
+ }
+
+ setPfp(null);
+ };
+
+ return (
+
+
+
+
+ Customize the profile image of the assistant for this workspace.
+
+
+
+
+
+ {pfp && (
+
+ )}
+
+
+
+ );
+}
diff --git a/frontend/src/pages/WorkspaceSettings/GeneralAppearance/index.jsx b/frontend/src/pages/WorkspaceSettings/GeneralAppearance/index.jsx
index ee00143e..b6d5b84a 100644
--- a/frontend/src/pages/WorkspaceSettings/GeneralAppearance/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/GeneralAppearance/index.jsx
@@ -6,6 +6,7 @@ import VectorCount from "./VectorCount";
import WorkspaceName from "./WorkspaceName";
import SuggestedChatMessages from "./SuggestedChatMessages";
import DeleteWorkspace from "./DeleteWorkspace";
+import WorkspacePfp from "./WorkspacePfp";
export default function GeneralInfo({ slug }) {
const [workspace, setWorkspace] = useState(null);
@@ -66,9 +67,8 @@ export default function GeneralInfo({ slug }) {
)}
-
-
-
+
+
>
);
diff --git a/frontend/src/utils/chat/index.js b/frontend/src/utils/chat/index.js
index f1df11fe..37237c9e 100644
--- a/frontend/src/utils/chat/index.js
+++ b/frontend/src/utils/chat/index.js
@@ -1,3 +1,5 @@
+export const ABORT_STREAM_EVENT = "abort-chat-stream";
+
// For handling of chat responses in the frontend by their various types.
export default function handleChat(
chatResult,
@@ -108,6 +110,22 @@ export default function handleChat(
_chatHistory[chatIdx] = updatedHistory;
}
setChatHistory([..._chatHistory]);
+ setLoadingResponse(false);
+ } else if (type === "stopGeneration") {
+ const chatIdx = _chatHistory.length - 1;
+ const existingHistory = { ..._chatHistory[chatIdx] };
+ const updatedHistory = {
+ ...existingHistory,
+ sources: [],
+ closed: true,
+ error: null,
+ animate: false,
+ pending: false,
+ };
+ _chatHistory[chatIdx] = updatedHistory;
+
+ setChatHistory([..._chatHistory]);
+ setLoadingResponse(false);
}
}
diff --git a/server/endpoints/system.js b/server/endpoints/system.js
index a36777c8..74b83688 100644
--- a/server/endpoints/system.js
+++ b/server/endpoints/system.js
@@ -548,8 +548,6 @@ function systemEndpoints(app) {
const userRecord = await User.get({ id: user.id });
const oldPfpFilename = userRecord.pfpFilename;
-
- console.log("oldPfpFilename", oldPfpFilename);
if (oldPfpFilename) {
const oldPfpPath = path.join(
__dirname,
diff --git a/server/endpoints/workspaces.js b/server/endpoints/workspaces.js
index 54228bba..2fe63e58 100644
--- a/server/endpoints/workspaces.js
+++ b/server/endpoints/workspaces.js
@@ -19,10 +19,21 @@ const { validWorkspaceSlug } = require("../utils/middleware/validWorkspace");
const { convertToChatHistory } = require("../utils/helpers/chat/responses");
const { CollectorApi } = require("../utils/collectorApi");
const { handleUploads } = setupMulter();
+const { setupPfpUploads } = require("../utils/files/multer");
+const { normalizePath } = require("../utils/files");
+const { handlePfpUploads } = setupPfpUploads();
+const path = require("path");
+const fs = require("fs");
+const {
+ determineWorkspacePfpFilepath,
+ fetchPfp,
+} = require("../utils/files/pfp");
function workspaceEndpoints(app) {
if (!app) return;
+ const responseCache = new Map();
+
app.post(
"/workspace/new",
[validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
@@ -422,6 +433,138 @@ function workspaceEndpoints(app) {
}
}
);
+
+ app.get(
+ "/workspace/:slug/pfp",
+ [validatedRequest, flexUserRoleValid([ROLES.all])],
+ async function (request, response) {
+ try {
+ const { slug } = request.params;
+ const cachedResponse = responseCache.get(slug);
+
+ if (cachedResponse) {
+ response.writeHead(200, {
+ "Content-Type": cachedResponse.mime || "image/png",
+ });
+ response.end(cachedResponse.buffer);
+ return;
+ }
+
+ const pfpPath = await determineWorkspacePfpFilepath(slug);
+
+ if (!pfpPath) {
+ response.sendStatus(204).end();
+ return;
+ }
+
+ const { found, buffer, mime } = fetchPfp(pfpPath);
+ if (!found) {
+ response.sendStatus(204).end();
+ return;
+ }
+
+ responseCache.set(slug, { buffer, mime });
+
+ response.writeHead(200, {
+ "Content-Type": mime || "image/png",
+ });
+ response.end(buffer);
+ return;
+ } catch (error) {
+ console.error("Error processing the logo request:", error);
+ response.status(500).json({ message: "Internal server error" });
+ }
+ }
+ );
+
+ app.post(
+ "/workspace/:slug/upload-pfp",
+ [validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
+ handlePfpUploads.single("file"),
+ async function (request, response) {
+ try {
+ const { slug } = request.params;
+ const uploadedFileName = request.randomFileName;
+ if (!uploadedFileName) {
+ return response.status(400).json({ message: "File upload failed." });
+ }
+
+ const workspaceRecord = await Workspace.get({
+ slug,
+ });
+
+ const oldPfpFilename = workspaceRecord.pfpFilename;
+ if (oldPfpFilename) {
+ const oldPfpPath = path.join(
+ __dirname,
+ `../storage/assets/pfp/${normalizePath(
+ workspaceRecord.pfpFilename
+ )}`
+ );
+
+ if (fs.existsSync(oldPfpPath)) fs.unlinkSync(oldPfpPath);
+ }
+
+ const { workspace, message } = await Workspace.update(
+ workspaceRecord.id,
+ {
+ pfpFilename: uploadedFileName,
+ }
+ );
+
+ return response.status(workspace ? 200 : 500).json({
+ message: workspace
+ ? "Profile picture uploaded successfully."
+ : message,
+ });
+ } catch (error) {
+ console.error("Error processing the profile picture upload:", error);
+ response.status(500).json({ message: "Internal server error" });
+ }
+ }
+ );
+
+ app.delete(
+ "/workspace/:slug/remove-pfp",
+ [validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
+ async function (request, response) {
+ try {
+ const { slug } = request.params;
+ const workspaceRecord = await Workspace.get({
+ slug,
+ });
+ const oldPfpFilename = workspaceRecord.pfpFilename;
+
+ if (oldPfpFilename) {
+ const oldPfpPath = path.join(
+ __dirname,
+ `../storage/assets/pfp/${normalizePath(oldPfpFilename)}`
+ );
+
+ if (fs.existsSync(oldPfpPath)) fs.unlinkSync(oldPfpPath);
+ }
+
+ const { workspace, message } = await Workspace.update(
+ workspaceRecord.id,
+ {
+ pfpFilename: null,
+ }
+ );
+
+ // Clear the cache
+ responseCache.delete(slug);
+
+ return response.status(workspace ? 200 : 500).json({
+ message: workspace
+ ? "Profile picture removed successfully."
+ : message,
+ });
+ } catch (error) {
+ console.error("Error processing the profile picture removal:", error);
+ response.status(500).json({ message: "Internal server error" });
+ }
+ }
+ );
}
module.exports = { workspaceEndpoints };
diff --git a/server/models/workspace.js b/server/models/workspace.js
index 92c2f9e3..48952c63 100644
--- a/server/models/workspace.js
+++ b/server/models/workspace.js
@@ -19,6 +19,7 @@ const Workspace = {
"chatModel",
"topN",
"chatMode",
+ "pfpFilename",
],
new: async function (name = null, creatorId = null) {
diff --git a/server/package.json b/server/package.json
index e521d4b6..c5654c36 100644
--- a/server/package.json
+++ b/server/package.json
@@ -26,7 +26,7 @@
"@google/generative-ai": "^0.1.3",
"@googleapis/youtube": "^9.0.0",
"@pinecone-database/pinecone": "^2.0.1",
- "@prisma/client": "5.3.0",
+ "@prisma/client": "5.3.1",
"@qdrant/js-client-rest": "^1.4.0",
"@xenova/transformers": "^2.14.0",
"@zilliz/milvus2-sdk-node": "^2.3.5",
@@ -52,7 +52,7 @@
"openai": "^3.2.1",
"pinecone-client": "^1.1.0",
"posthog-node": "^3.1.1",
- "prisma": "^5.3.1",
+ "prisma": "5.3.1",
"slugify": "^1.6.6",
"sqlite": "^4.2.1",
"sqlite3": "^5.1.6",
@@ -78,4 +78,4 @@
"nodemon": "^2.0.22",
"prettier": "^3.0.3"
}
-}
+}
\ No newline at end of file
diff --git a/server/prisma/migrations/20240301002308_init/migration.sql b/server/prisma/migrations/20240301002308_init/migration.sql
new file mode 100644
index 00000000..5847beaf
--- /dev/null
+++ b/server/prisma/migrations/20240301002308_init/migration.sql
@@ -0,0 +1,2 @@
+-- AlterTable
+ALTER TABLE "workspaces" ADD COLUMN "pfpFilename" TEXT;
diff --git a/server/prisma/schema.prisma b/server/prisma/schema.prisma
index 8cd3a1d3..e6121e29 100644
--- a/server/prisma/schema.prisma
+++ b/server/prisma/schema.prisma
@@ -100,6 +100,7 @@ model workspaces {
chatModel String?
topN Int? @default(4)
chatMode String? @default("chat")
+ pfpFilename String?
workspace_users workspace_users[]
documents workspace_documents[]
workspace_suggested_messages workspace_suggested_messages[]
diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js
index a48058e8..fea08332 100644
--- a/server/utils/AiProviders/anthropic/index.js
+++ b/server/utils/AiProviders/anthropic/index.js
@@ -1,6 +1,9 @@
const { v4 } = require("uuid");
const { chatPrompt } = require("../../chats");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
class AnthropicLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.ANTHROPIC_API_KEY)
@@ -150,6 +153,13 @@ class AnthropicLLM {
let fullText = "";
const { uuid = v4(), sources = [] } = responseProps;
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
stream.on("streamEvent", (message) => {
const data = message;
if (
@@ -181,6 +191,7 @@ class AnthropicLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
}
});
diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js
index 2ac6de3a..21fc5cd9 100644
--- a/server/utils/AiProviders/azureOpenAi/index.js
+++ b/server/utils/AiProviders/azureOpenAi/index.js
@@ -1,6 +1,9 @@
const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi");
const { chatPrompt } = require("../../chats");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
class AzureOpenAiLLM {
constructor(embedder = null, _modelPreference = null) {
@@ -174,6 +177,14 @@ class AzureOpenAiLLM {
return new Promise(async (resolve) => {
let fullText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
for await (const event of stream) {
for (const choice of event.choices) {
const delta = choice.delta?.content;
@@ -198,6 +209,7 @@ class AzureOpenAiLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
});
}
diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index bd84a385..3d334b29 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -1,5 +1,8 @@
const { chatPrompt } = require("../../chats");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
class GeminiLLM {
constructor(embedder = null, modelPreference = null) {
@@ -198,6 +201,14 @@ class GeminiLLM {
return new Promise(async (resolve) => {
let fullText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
for await (const chunk of stream) {
fullText += chunk.text();
writeResponseChunk(response, {
@@ -218,6 +229,7 @@ class GeminiLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
});
}
diff --git a/server/utils/AiProviders/huggingface/index.js b/server/utils/AiProviders/huggingface/index.js
index 416e622a..751d3595 100644
--- a/server/utils/AiProviders/huggingface/index.js
+++ b/server/utils/AiProviders/huggingface/index.js
@@ -1,7 +1,10 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
const { chatPrompt } = require("../../chats");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
class HuggingFaceLLM {
constructor(embedder = null, _modelPreference = null) {
@@ -172,6 +175,14 @@ class HuggingFaceLLM {
return new Promise((resolve) => {
let fullText = "";
let chunk = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
stream.data.on("data", (data) => {
const lines = data
?.toString()
@@ -218,6 +229,7 @@ class HuggingFaceLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
} else {
let error = null;
@@ -241,6 +253,7 @@ class HuggingFaceLLM {
close: true,
error,
});
+ response.removeListener("close", handleAbort);
resolve("");
return;
}
@@ -266,6 +279,7 @@ class HuggingFaceLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
}
}
diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js
index 157fb752..5764d4ee 100644
--- a/server/utils/AiProviders/native/index.js
+++ b/server/utils/AiProviders/native/index.js
@@ -2,7 +2,10 @@ const fs = require("fs");
const path = require("path");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { chatPrompt } = require("../../chats");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
// Docs: https://api.js.langchain.com/classes/chat_models_llama_cpp.ChatLlamaCpp.html
const ChatLlamaCpp = (...args) =>
@@ -176,6 +179,14 @@ class NativeLLM {
return new Promise(async (resolve) => {
let fullText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
for await (const chunk of stream) {
if (chunk === undefined)
throw new Error(
@@ -202,6 +213,7 @@ class NativeLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
});
}
diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js
index 035d4a9d..6bd857b4 100644
--- a/server/utils/AiProviders/ollama/index.js
+++ b/server/utils/AiProviders/ollama/index.js
@@ -1,6 +1,9 @@
const { chatPrompt } = require("../../chats");
const { StringOutputParser } = require("langchain/schema/output_parser");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md
class OllamaAILLM {
@@ -180,8 +183,16 @@ class OllamaAILLM {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
+ let fullText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
try {
- let fullText = "";
for await (const chunk of stream) {
if (chunk === undefined)
throw new Error(
@@ -210,6 +221,7 @@ class OllamaAILLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
} catch (error) {
writeResponseChunk(response, {
@@ -222,6 +234,7 @@ class OllamaAILLM {
error?.cause ?? error.message
}`,
});
+ response.removeListener("close", handleAbort);
}
});
}
diff --git a/server/utils/AiProviders/openRouter/index.js b/server/utils/AiProviders/openRouter/index.js
index 38a6f9f0..a1f606f6 100644
--- a/server/utils/AiProviders/openRouter/index.js
+++ b/server/utils/AiProviders/openRouter/index.js
@@ -1,7 +1,10 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { chatPrompt } = require("../../chats");
const { v4: uuidv4 } = require("uuid");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
function openRouterModels() {
const { MODELS } = require("./models.js");
@@ -195,6 +198,13 @@ class OpenRouterLLM {
let chunk = "";
let lastChunkTime = null; // null when first token is still not received.
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
// NOTICE: Not all OpenRouter models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
@@ -220,6 +230,7 @@ class OpenRouterLLM {
error: false,
});
clearInterval(timeoutCheck);
+ response.removeListener("close", handleAbort);
resolve(fullText);
}
}, 500);
@@ -269,6 +280,7 @@ class OpenRouterLLM {
error: false,
});
clearInterval(timeoutCheck);
+ response.removeListener("close", handleAbort);
resolve(fullText);
} else {
let finishReason = null;
@@ -305,6 +317,7 @@ class OpenRouterLLM {
error: false,
});
clearInterval(timeoutCheck);
+ response.removeListener("close", handleAbort);
resolve(fullText);
}
}
diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js
index 15b254a1..def03df9 100644
--- a/server/utils/AiProviders/togetherAi/index.js
+++ b/server/utils/AiProviders/togetherAi/index.js
@@ -1,5 +1,8 @@
const { chatPrompt } = require("../../chats");
-const { writeResponseChunk } = require("../../helpers/chat/responses");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
function togetherAiModels() {
const { MODELS } = require("./models.js");
@@ -185,6 +188,14 @@ class TogetherAiLLM {
return new Promise((resolve) => {
let fullText = "";
let chunk = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
stream.data.on("data", (data) => {
const lines = data
?.toString()
@@ -230,6 +241,7 @@ class TogetherAiLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
} else {
let finishReason = null;
@@ -263,6 +275,7 @@ class TogetherAiLLM {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
}
}
diff --git a/server/utils/files/pfp.js b/server/utils/files/pfp.js
index dd6ba0fe..0d1dd9f8 100644
--- a/server/utils/files/pfp.js
+++ b/server/utils/files/pfp.js
@@ -3,6 +3,7 @@ const fs = require("fs");
const { getType } = require("mime");
const { User } = require("../../models/user");
const { normalizePath } = require(".");
+const { Workspace } = require("../../models/workspace");
function fetchPfp(pfpPath) {
if (!fs.existsSync(pfpPath)) {
@@ -38,7 +39,21 @@ async function determinePfpFilepath(id) {
return pfpFilepath;
}
+async function determineWorkspacePfpFilepath(slug) {
+ const workspace = await Workspace.get({ slug });
+ const pfpFilename = workspace?.pfpFilename || null;
+ if (!pfpFilename) return null;
+
+ const basePath = process.env.STORAGE_DIR
+ ? path.join(process.env.STORAGE_DIR, "assets/pfp")
+ : path.join(__dirname, "../../storage/assets/pfp");
+ const pfpFilepath = path.join(basePath, normalizePath(pfpFilename));
+ if (!fs.existsSync(pfpFilepath)) return null;
+ return pfpFilepath;
+}
+
module.exports = {
fetchPfp,
determinePfpFilepath,
+ determineWorkspacePfpFilepath,
};
diff --git a/server/utils/helpers/chat/responses.js b/server/utils/helpers/chat/responses.js
index c4371d81..e2ec7bd0 100644
--- a/server/utils/helpers/chat/responses.js
+++ b/server/utils/helpers/chat/responses.js
@@ -1,6 +1,14 @@
const { v4: uuidv4 } = require("uuid");
const moment = require("moment");
+function clientAbortedHandler(resolve, fullText) {
+ console.log(
+ "\x1b[43m\x1b[34m[STREAM ABORTED]\x1b[0m Client requested to abort stream. Exiting LLM stream handler early."
+ );
+ resolve(fullText);
+ return;
+}
+
// The default way to handle a stream response. Functions best with OpenAI.
// Currently used for LMStudio, LocalAI, Mistral API, and OpenAI
function handleDefaultStreamResponse(response, stream, responseProps) {
@@ -9,6 +17,14 @@ function handleDefaultStreamResponse(response, stream, responseProps) {
return new Promise((resolve) => {
let fullText = "";
let chunk = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
stream.data.on("data", (data) => {
const lines = data
?.toString()
@@ -52,6 +68,7 @@ function handleDefaultStreamResponse(response, stream, responseProps) {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
} else {
let finishReason = null;
@@ -85,6 +102,7 @@ function handleDefaultStreamResponse(response, stream, responseProps) {
close: true,
error: false,
});
+ response.removeListener("close", handleAbort);
resolve(fullText);
}
}
@@ -141,4 +159,5 @@ module.exports = {
convertToChatHistory,
convertToPromptHistory,
writeResponseChunk,
+ clientAbortedHandler,
};
diff --git a/server/yarn.lock b/server/yarn.lock
index 61b29e3a..4cfbb7ff 100644
--- a/server/yarn.lock
+++ b/server/yarn.lock
@@ -649,17 +649,17 @@
resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.0.tgz#7d8dacb7fdef0e4387caf7396cbd77f179867d06"
integrity sha512-Zwq5OCzuwJC2jwqmpEQt7Ds1DTi6BWSwoGkbb1n9pO3hzb35BoJELx7c0T23iDkBGkh2e7tvOtjF3tr3OaQHDQ==
-"@prisma/client@5.3.0":
- version "5.3.0"
- resolved "https://registry.yarnpkg.com/@prisma/client/-/client-5.3.0.tgz#47f07e5639993cffcf1c740a144495410562f279"
- integrity sha512-cduYBlwj6oBfAUx2OI5i7t3NlpVeOtkN7pAqv0cw0B6gs4y8cY1mr8ZYywja0NUCOCqEWDkcZWBTVBwm6mnRIw==
+"@prisma/client@5.3.1":
+ version "5.3.1"
+ resolved "https://registry.yarnpkg.com/@prisma/client/-/client-5.3.1.tgz#fc7fc2d91e814cc4fe18a4bc5e78bf851c26985e"
+ integrity sha512-ArOKjHwdFZIe1cGU56oIfy7wRuTn0FfZjGuU/AjgEBOQh+4rDkB6nF+AGHP8KaVpkBIiHGPQh3IpwQ3xDMdO0Q==
dependencies:
- "@prisma/engines-version" "5.3.0-36.e90b936d84779543cbe0e494bc8b9d7337fad8e4"
+ "@prisma/engines-version" "5.3.1-2.61e140623197a131c2a6189271ffee05a7aa9a59"
-"@prisma/engines-version@5.3.0-36.e90b936d84779543cbe0e494bc8b9d7337fad8e4":
- version "5.3.0-36.e90b936d84779543cbe0e494bc8b9d7337fad8e4"
- resolved "https://registry.yarnpkg.com/@prisma/engines-version/-/engines-version-5.3.0-36.e90b936d84779543cbe0e494bc8b9d7337fad8e4.tgz#46ee2884e04cdba1163461ef856cec882d31c836"
- integrity sha512-uftIog5FQ/OUR8Vb9TzpNBJ6L+zJnBgmd1A0uPJUzuvGMU32UmeyobpdXVzST5UprKryTdWOYXQFVyiQ2OU4Nw==
+"@prisma/engines-version@5.3.1-2.61e140623197a131c2a6189271ffee05a7aa9a59":
+ version "5.3.1-2.61e140623197a131c2a6189271ffee05a7aa9a59"
+ resolved "https://registry.yarnpkg.com/@prisma/engines-version/-/engines-version-5.3.1-2.61e140623197a131c2a6189271ffee05a7aa9a59.tgz#7eb6f5c6b7628b8b39df55c903f411528a6f761c"
+ integrity sha512-y5qbUi3ql2Xg7XraqcXEdMHh0MocBfnBzDn5GbV1xk23S3Mq8MGs+VjacTNiBh3dtEdUERCrUUG7Z3QaJ+h79w==
"@prisma/engines@5.3.1":
version "5.3.1"
@@ -4455,7 +4455,7 @@ prettier@^3.0.3:
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.1.1.tgz#6ba9f23165d690b6cbdaa88cb0807278f7019848"
integrity sha512-22UbSzg8luF4UuZtzgiUOfcGM8s4tjBv6dJRT7j275NXsy2jb4aJa4NNveul5x4eqlF1wuhuR2RElK71RvmVaw==
-prisma@^5.3.1:
+prisma@5.3.1:
version "5.3.1"
resolved "https://registry.yarnpkg.com/prisma/-/prisma-5.3.1.tgz#a0932c1c1a5ed4ff449d064b193d9c7e94e8bf77"
integrity sha512-Wp2msQIlMPHe+5k5Od6xnsI/WNG7UJGgFUJgqv/ygc7kOECZapcSz/iU4NIEzISs3H1W9sFLjAPbg/gOqqtB7A==