)}
+ {vectorDB === "qdrant" && (
+ <>
+
+
+
+
+
+
+
+
+ >
+ )}
{vectorDB === "weaviate" && (
<>
diff --git a/frontend/src/media/vectordbs/qdrant.png b/frontend/src/media/vectordbs/qdrant.png
new file mode 100644
index 00000000..d63e720c
Binary files /dev/null and b/frontend/src/media/vectordbs/qdrant.png differ
diff --git a/server/.env.example b/server/.env.example
index 606dd898..ff92295e 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -36,6 +36,11 @@ PINECONE_INDEX=
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
+# Enable all below if you are using vector database: Qdrant.
+# VECTOR_DB="qdrant"
+# QDRANT_ENDPOINT="http://localhost:6333"
+# QDRANT_API_KEY=
+
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
diff --git a/server/endpoints/system.js b/server/endpoints/system.js
index 73041e18..d0b48cc6 100644
--- a/server/endpoints/system.js
+++ b/server/endpoints/system.js
@@ -78,6 +78,12 @@ function systemEndpoints(app) {
WeaviateApiKey: process.env.WEAVIATE_API_KEY,
}
: {}),
+ ...(vectorDB === "qdrant"
+ ? {
+ QdrantEndpoint: process.env.QDRANT_ENDPOINT,
+ QdrantApiKey: process.env.QDRANT_API_KEY,
+ }
+ : {}),
LLMProvider: llmProvider,
...(llmProvider === "openai"
? {
diff --git a/server/package.json b/server/package.json
index b2f5bdc8..15bbff6f 100644
--- a/server/package.json
+++ b/server/package.json
@@ -18,6 +18,7 @@
"@azure/openai": "^1.0.0-beta.3",
"@googleapis/youtube": "^9.0.0",
"@pinecone-database/pinecone": "^0.1.6",
+ "@qdrant/js-client-rest": "^1.4.0",
"archiver": "^5.3.1",
"bcrypt": "^5.1.0",
"body-parser": "^1.20.2",
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index b7fb5ae0..b077606a 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -13,6 +13,9 @@ function getVectorDbClass() {
case "weaviate":
const { Weaviate } = require("../vectorDbProviders/weaviate");
return Weaviate;
+ case "qdrant":
+ const { QDrant } = require("../vectorDbProviders/qdrant");
+ return QDrant;
default:
throw new Error("ENV: No VECTOR_DB value found in environment!");
}
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 9f00ec42..d08f25c7 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -47,6 +47,14 @@ const KEY_MAPPING = {
envKey: "WEAVIATE_API_KEY",
checks: [],
},
+ QdrantEndpoint: {
+ envKey: "QDRANT_ENDPOINT",
+ checks: [isValidURL],
+ },
+ QdrantApiKey: {
+ envKey: "QDRANT_API_KEY",
+ checks: [],
+ },
PineConeEnvironment: {
envKey: "PINECONE_ENVIRONMENT",
@@ -112,7 +120,7 @@ function validOpenAIModel(input = "") {
}
function supportedVectorDB(input = "") {
- const supported = ["chroma", "pinecone", "lancedb", "weaviate"];
+ const supported = ["chroma", "pinecone", "lancedb", "weaviate", "qdrant"];
return supported.includes(input)
? null
: `Invalid VectorDB type. Must be one of ${supported.join(", ")}.`;
diff --git a/server/utils/vectorDbProviders/qdrant/index.js b/server/utils/vectorDbProviders/qdrant/index.js
new file mode 100644
index 00000000..0dc39e79
--- /dev/null
+++ b/server/utils/vectorDbProviders/qdrant/index.js
@@ -0,0 +1,397 @@
+const { QdrantClient } = require("@qdrant/js-client-rest");
+const { RecursiveCharacterTextSplitter } = require("langchain/text_splitter");
+const { storeVectorResult, cachedVectorInformation } = require("../../files");
+const { v4: uuidv4 } = require("uuid");
+const { toChunks, getLLMProvider } = require("../../helpers");
+const { chatPrompt } = require("../../chats");
+
+const QDrant = {
+ name: "QDrant",
+ connect: async function () {
+ if (process.env.VECTOR_DB !== "qdrant")
+ throw new Error("QDrant::Invalid ENV settings");
+
+ const client = new QdrantClient({
+ url: process.env.QDRANT_ENDPOINT,
+ ...(process.env.QDRANT_API_KEY
+ ? { apiKey: process.env.QDRANT_API_KEY }
+ : {}),
+ });
+
+ const isAlive = (await client.api("cluster")?.clusterStatus())?.ok || false;
+ if (!isAlive)
+ throw new Error(
+ "QDrant::Invalid Heartbeat received - is the instance online?"
+ );
+
+ return { client };
+ },
+ heartbeat: async function () {
+ await this.connect();
+ return { heartbeat: Number(new Date()) };
+ },
+ totalIndicies: async function () {
+ const { client } = await this.connect();
+ const { collections } = await client.getCollections();
+ var totalVectors = 0;
+ for (const collection of collections) {
+ if (!collection || !collection.name) continue;
+ totalVectors +=
+ (await this.namespace(client, collection.name))?.vectorCount || 0;
+ }
+ return totalVectors;
+ },
+ namespaceCount: async function (_namespace = null) {
+ const { client } = await this.connect();
+ const namespace = await this.namespace(client, _namespace);
+ return namespace?.vectorCount || 0;
+ },
+ similarityResponse: async function (_client, namespace, queryVector) {
+ const { client } = await this.connect();
+ const result = {
+ contextTexts: [],
+ sourceDocuments: [],
+ };
+
+ const responses = await client.search(namespace, {
+ vector: queryVector,
+ limit: 4,
+ });
+
+ responses.forEach((response) => {
+ result.contextTexts.push(response?.payload?.text || "");
+ result.sourceDocuments.push({
+ ...(response?.payload || {}),
+ id: response.id,
+ });
+ });
+
+ return result;
+ },
+ namespace: async function (client, namespace = null) {
+ if (!namespace) throw new Error("No namespace value provided.");
+ const collection = await client.getCollection(namespace).catch(() => null);
+ if (!collection) return null;
+
+ return {
+ name: namespace,
+ ...collection,
+ vectorCount: collection.vectors_count,
+ };
+ },
+ hasNamespace: async function (namespace = null) {
+ if (!namespace) return false;
+ const { client } = await this.connect();
+ return await this.namespaceExists(client, namespace);
+ },
+ namespaceExists: async function (client, namespace = null) {
+ if (!namespace) throw new Error("No namespace value provided.");
+ const collection = await client.getCollection(namespace).catch((e) => {
+ console.error("QDrant::namespaceExists", e.message);
+ return null;
+ });
+ return !!collection;
+ },
+ deleteVectorsInNamespace: async function (client, namespace = null) {
+ await client.deleteCollection(namespace);
+ return true;
+ },
+ getOrCreateCollection: async function (client, namespace) {
+ if (await this.namespaceExists(client, namespace)) {
+ return await client.getCollection(namespace);
+ }
+ await client.createCollection(namespace, {
+ vectors: {
+ size: 1536, //TODO: Fixed to OpenAI models - when other embeddings exist make variable.
+ distance: "Cosine",
+ },
+ });
+ return await client.getCollection(namespace);
+ },
+ addDocumentToNamespace: async function (
+ namespace,
+ documentData = {},
+ fullFilePath = null
+ ) {
+ const { DocumentVectors } = require("../../../models/vectors");
+ try {
+ const { pageContent, docId, ...metadata } = documentData;
+ if (!pageContent || pageContent.length == 0) return false;
+
+ console.log("Adding new vectorized document into namespace", namespace);
+ const cacheResult = await cachedVectorInformation(fullFilePath);
+ if (cacheResult.exists) {
+ const { client } = await this.connect();
+ const collection = await this.getOrCreateCollection(client, namespace);
+ if (!collection)
+ throw new Error("Failed to create new QDrant collection!", {
+ namespace,
+ });
+
+ const { chunks } = cacheResult;
+ const documentVectors = [];
+
+ for (const chunk of chunks) {
+ const submission = {
+ ids: [],
+ vectors: [],
+ payloads: [],
+ };
+
+ // Before sending to Qdrant and saving the records to our db
+ // we need to assign the id of each chunk that is stored in the cached file.
+ chunk.forEach((chunk) => {
+ const id = uuidv4();
+ const { id: _id, ...payload } = chunk.payload;
+ documentVectors.push({ docId, vectorId: id });
+ submission.ids.push(id);
+ submission.vectors.push(chunk.vector);
+ submission.payloads.push(payload);
+ });
+
+ const additionResult = await client.upsert(namespace, {
+ wait: true,
+ batch: { ...submission },
+ });
+ if (additionResult?.status !== "completed")
+ throw new Error("Error embedding into QDrant", additionResult);
+ }
+
+ await DocumentVectors.bulkInsert(documentVectors);
+ return true;
+ }
+
+ // If we are here then we are going to embed and store a novel document.
+ // We have to do this manually as opposed to using LangChains `Qdrant.fromDocuments`
+ // because we then cannot atomically control our namespace to granularly find/remove documents
+ // from vectordb.
+ const textSplitter = new RecursiveCharacterTextSplitter({
+ chunkSize: 1000,
+ chunkOverlap: 20,
+ });
+ const textChunks = await textSplitter.splitText(pageContent);
+
+ console.log("Chunks created from document:", textChunks.length);
+ const LLMConnector = getLLMProvider();
+ const documentVectors = [];
+ const vectors = [];
+ const vectorValues = await LLMConnector.embedChunks(textChunks);
+ const submission = {
+ ids: [],
+ vectors: [],
+ payloads: [],
+ };
+
+ if (!!vectorValues && vectorValues.length > 0) {
+ for (const [i, vector] of vectorValues.entries()) {
+ const vectorRecord = {
+ id: uuidv4(),
+ vector: vector,
+ // [DO NOT REMOVE]
+ // LangChain will be unable to find your text if you embed manually and dont include the `text` key.
+ // https://github.com/hwchase17/langchainjs/blob/2def486af734c0ca87285a48f1a04c057ab74bdf/langchain/src/vectorstores/pinecone.ts#L64
+ payload: { ...metadata, text: textChunks[i] },
+ };
+
+ submission.ids.push(vectorRecord.id);
+ submission.vectors.push(vectorRecord.vector);
+ submission.payloads.push(vectorRecord.payload);
+
+ vectors.push(vectorRecord);
+ documentVectors.push({ docId, vectorId: vectorRecord.id });
+ }
+ } else {
+ console.error(
+ "Could not use OpenAI to embed document chunks! This document will not be recorded."
+ );
+ }
+
+ const { client } = await this.connect();
+ const collection = await this.getOrCreateCollection(client, namespace);
+ if (!collection)
+ throw new Error("Failed to create new QDrant collection!", {
+ namespace,
+ });
+
+ if (vectors.length > 0) {
+ const chunks = [];
+
+ console.log("Inserting vectorized chunks into QDrant collection.");
+ for (const chunk of toChunks(vectors, 500)) chunks.push(chunk);
+
+ const additionResult = await client.upsert(namespace, {
+ wait: true,
+ batch: {
+ ids: submission.ids,
+ vectors: submission.vectors,
+ payloads: submission.payloads,
+ },
+ });
+ if (additionResult?.status !== "completed")
+ throw new Error("Error embedding into QDrant", additionResult);
+
+ await storeVectorResult(chunks, fullFilePath);
+ }
+
+ await DocumentVectors.bulkInsert(documentVectors);
+ return true;
+ } catch (e) {
+ console.error("addDocumentToNamespace", e.message);
+ return false;
+ }
+ },
+ deleteDocumentFromNamespace: async function (namespace, docId) {
+ const { DocumentVectors } = require("../../../models/vectors");
+ const { client } = await this.connect();
+ if (!(await this.namespaceExists(client, namespace))) return;
+
+ const knownDocuments = await DocumentVectors.where(`docId = '${docId}'`);
+ if (knownDocuments.length === 0) return;
+
+ const vectorIds = knownDocuments.map((doc) => doc.vectorId);
+ await client.delete(namespace, {
+ wait: true,
+ points: vectorIds,
+ });
+
+ const indexes = knownDocuments.map((doc) => doc.id);
+ await DocumentVectors.deleteIds(indexes);
+ return true;
+ },
+ query: async function (reqBody = {}) {
+ const { namespace = null, input, workspace = {} } = reqBody;
+ if (!namespace || !input) throw new Error("Invalid request body");
+
+ const { client } = await this.connect();
+ if (!(await this.namespaceExists(client, namespace))) {
+ return {
+ response: null,
+ sources: [],
+ message: "Invalid query - no documents found for workspace!",
+ };
+ }
+
+ const LLMConnector = getLLMProvider();
+ const queryVector = await LLMConnector.embedTextInput(input);
+ const { contextTexts, sourceDocuments } = await this.similarityResponse(
+ client,
+ namespace,
+ queryVector
+ );
+ const prompt = {
+ role: "system",
+ content: `${chatPrompt(workspace)}
+ Context:
+ ${contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")}`,
+ };
+ const memory = [prompt, { role: "user", content: input }];
+ const responseText = await LLMConnector.getChatCompletion(memory, {
+ temperature: workspace?.openAiTemp ?? 0.7,
+ });
+
+ return {
+ response: responseText,
+ sources: this.curateSources(sourceDocuments),
+ message: false,
+ };
+ },
+ // This implementation of chat uses the chat history and modifies the system prompt at execution
+ // this is improved over the regular langchain implementation so that chats do not directly modify embeddings
+ // because then multi-user support will have all conversations mutating the base vector collection to which then
+ // the only solution is replicating entire vector databases per user - which will very quickly consume space on VectorDbs
+ chat: async function (reqBody = {}) {
+ const {
+ namespace = null,
+ input,
+ workspace = {},
+ chatHistory = [],
+ } = reqBody;
+ if (!namespace || !input) throw new Error("Invalid request body");
+
+ const { client } = await this.connect();
+ if (!(await this.namespaceExists(client, namespace))) {
+ return {
+ response: null,
+ sources: [],
+ message: "Invalid query - no documents found for workspace!",
+ };
+ }
+
+ const LLMConnector = getLLMProvider();
+ const queryVector = await LLMConnector.embedTextInput(input);
+ const { contextTexts, sourceDocuments } = await this.similarityResponse(
+ client,
+ namespace,
+ queryVector
+ );
+ const prompt = {
+ role: "system",
+ content: `${chatPrompt(workspace)}
+ Context:
+ ${contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")}`,
+ };
+ const memory = [prompt, ...chatHistory, { role: "user", content: input }];
+ const responseText = await LLMConnector.getChatCompletion(memory, {
+ temperature: workspace?.openAiTemp ?? 0.7,
+ });
+
+ return {
+ response: responseText,
+ sources: this.curateSources(sourceDocuments),
+ message: false,
+ };
+ },
+ "namespace-stats": async function (reqBody = {}) {
+ const { namespace = null } = reqBody;
+ if (!namespace) throw new Error("namespace required");
+ const { client } = await this.connect();
+ if (!(await this.namespaceExists(client, namespace)))
+ throw new Error("Namespace by that name does not exist.");
+ const stats = await this.namespace(client, namespace);
+ return stats
+ ? stats
+ : { message: "No stats were able to be fetched from DB for namespace" };
+ },
+ "delete-namespace": async function (reqBody = {}) {
+ const { namespace = null } = reqBody;
+ const { client } = await this.connect();
+ if (!(await this.namespaceExists(client, namespace)))
+ throw new Error("Namespace by that name does not exist.");
+
+ const details = await this.namespace(client, namespace);
+ await this.deleteVectorsInNamespace(client, namespace);
+ return {
+ message: `Namespace ${namespace} was deleted along with ${details?.vectorCount} vectors.`,
+ };
+ },
+ reset: async function () {
+ const { client } = await this.connect();
+ const response = await client.getCollections();
+ for (const collection of response.collections) {
+ await client.deleteCollection(collection.name);
+ }
+ return { reset: true };
+ },
+ curateSources: function (sources = []) {
+ const documents = [];
+ for (const source of sources) {
+ if (Object.keys(source).length > 0) {
+ documents.push({
+ ...source,
+ });
+ }
+ }
+
+ return documents;
+ },
+};
+
+module.exports.QDrant = QDrant;
diff --git a/server/yarn.lock b/server/yarn.lock
index 2ff2aec4..6a9e1669 100644
--- a/server/yarn.lock
+++ b/server/yarn.lock
@@ -173,6 +173,25 @@
dependencies:
cross-fetch "^3.1.5"
+"@qdrant/js-client-rest@^1.4.0":
+ version "1.4.0"
+ resolved "https://registry.yarnpkg.com/@qdrant/js-client-rest/-/js-client-rest-1.4.0.tgz#efd341a9a30b241e7e11f773b581b3102db1adc6"
+ integrity sha512-I3pCKnaVdqiVpZ9+XtEjCx7IQSJnerXffD/g8mj/fZsOOJH3IFM+nF2izOfVIByufAArW+drGcAPrxHedba99w==
+ dependencies:
+ "@qdrant/openapi-typescript-fetch" "^1.2.1"
+ "@sevinf/maybe" "^0.5.0"
+ undici "^5.22.1"
+
+"@qdrant/openapi-typescript-fetch@^1.2.1":
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/@qdrant/openapi-typescript-fetch/-/openapi-typescript-fetch-1.2.1.tgz#6e232899ca0a7fbc769f0c3a229b56f93da39f19"
+ integrity sha512-oiBJRN1ME7orFZocgE25jrM3knIF/OKJfMsZPBbtMMKfgNVYfps0MokGvSJkBmecj6bf8QoLXWIGlIoaTM4Zmw==
+
+"@sevinf/maybe@^0.5.0":
+ version "0.5.0"
+ resolved "https://registry.yarnpkg.com/@sevinf/maybe/-/maybe-0.5.0.tgz#e59fcea028df615fe87d708bb30e1f338e46bb44"
+ integrity sha512-ARhyoYDnY1LES3vYI0fiG6e9esWfTNcXcO6+MPJJXcnyMV3bim4lnFt45VXouV7y82F4x3YH8nOQ6VztuvUiWg==
+
"@tootallnate/once@1":
version "1.1.2"
resolved "https://registry.yarnpkg.com/@tootallnate/once/-/once-1.1.2.tgz#ccb91445360179a04e7fe6aff78c00ffc1eeaf82"
@@ -526,7 +545,7 @@ buffer@^5.5.0:
base64-js "^1.3.1"
ieee754 "^1.1.13"
-busboy@^1.0.0:
+busboy@^1.0.0, busboy@^1.6.0:
version "1.6.0"
resolved "https://registry.yarnpkg.com/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893"
integrity sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==
@@ -2505,6 +2524,13 @@ undefsafe@^2.0.5:
resolved "https://registry.yarnpkg.com/undefsafe/-/undefsafe-2.0.5.tgz#38733b9327bdcd226db889fb723a6efd162e6e2c"
integrity sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==
+undici@^5.22.1:
+ version "5.23.0"
+ resolved "https://registry.yarnpkg.com/undici/-/undici-5.23.0.tgz#e7bdb0ed42cebe7b7aca87ced53e6eaafb8f8ca0"
+ integrity sha512-1D7w+fvRsqlQ9GscLBwcAJinqcZGHUKjbOmXdlE/v8BvEGXjeWAax+341q44EuTcHXXnfyKNbKRq4Lg7OzhMmg==
+ dependencies:
+ busboy "^1.6.0"
+
unique-filename@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230"