anything-llm/server/utils/vectorDbProviders/qdrant/index.js

408 lines
14 KiB
JavaScript
Raw Normal View History

const { QdrantClient } = require("@qdrant/js-client-rest");
const { TextSplitter } = require("../../TextSplitter");
const { SystemSettings } = require("../../../models/systemSettings");
const { storeVectorResult, cachedVectorInformation } = require("../../files");
const { v4: uuidv4 } = require("uuid");
const { toChunks, getEmbeddingEngineSelection } = require("../../helpers");
const { sourceIdentifier } = require("../../chats");
const QDrant = {
name: "QDrant",
connect: async function () {
if (process.env.VECTOR_DB !== "qdrant")
throw new Error("QDrant::Invalid ENV settings");
const client = new QdrantClient({
url: process.env.QDRANT_ENDPOINT,
...(process.env.QDRANT_API_KEY
? { apiKey: process.env.QDRANT_API_KEY }
: {}),
});
const isAlive = (await client.api("cluster")?.clusterStatus())?.ok || false;
if (!isAlive)
throw new Error(
"QDrant::Invalid Heartbeat received - is the instance online?"
);
return { client };
},
heartbeat: async function () {
await this.connect();
return { heartbeat: Number(new Date()) };
},
totalVectors: async function () {
const { client } = await this.connect();
const { collections } = await client.getCollections();
var totalVectors = 0;
for (const collection of collections) {
if (!collection || !collection.name) continue;
totalVectors +=
(await this.namespace(client, collection.name))?.vectorCount || 0;
}
return totalVectors;
},
namespaceCount: async function (_namespace = null) {
const { client } = await this.connect();
const namespace = await this.namespace(client, _namespace);
return namespace?.vectorCount || 0;
},
similarityResponse: async function (
_client,
namespace,
queryVector,
similarityThreshold = 0.25,
topN = 4,
filterIdentifiers = []
) {
const { client } = await this.connect();
const result = {
contextTexts: [],
sourceDocuments: [],
scores: [],
};
const responses = await client.search(namespace, {
vector: queryVector,
limit: topN,
with_payload: true,
});
responses.forEach((response) => {
if (response.score < similarityThreshold) return;
if (filterIdentifiers.includes(sourceIdentifier(response?.payload))) {
console.log(
"QDrant: A source was filtered from context as it's parent document is pinned."
);
return;
}
result.contextTexts.push(response?.payload?.text || "");
result.sourceDocuments.push({
...(response?.payload || {}),
id: response.id,
});
result.scores.push(response.score);
});
return result;
},
namespace: async function (client, namespace = null) {
if (!namespace) throw new Error("No namespace value provided.");
const collection = await client.getCollection(namespace).catch(() => null);
if (!collection) return null;
return {
name: namespace,
...collection,
vectorCount: (await client.count(namespace, { exact: true })).count,
};
},
hasNamespace: async function (namespace = null) {
if (!namespace) return false;
const { client } = await this.connect();
return await this.namespaceExists(client, namespace);
},
namespaceExists: async function (client, namespace = null) {
if (!namespace) throw new Error("No namespace value provided.");
const collection = await client.getCollection(namespace).catch((e) => {
console.error("QDrant::namespaceExists", e.message);
return null;
});
return !!collection;
},
deleteVectorsInNamespace: async function (client, namespace = null) {
await client.deleteCollection(namespace);
return true;
},
// QDrant requires a dimension aspect for collection creation
// we pass this in from the first chunk to infer the dimensions like other
// providers do.
getOrCreateCollection: async function (client, namespace, dimensions = null) {
if (await this.namespaceExists(client, namespace)) {
return await client.getCollection(namespace);
}
if (!dimensions)
throw new Error(
`Qdrant:getOrCreateCollection Unable to infer vector dimension from input. Open an issue on Github for support.`
);
await client.createCollection(namespace, {
vectors: {
size: dimensions,
distance: "Cosine",
},
});
return await client.getCollection(namespace);
},
addDocumentToNamespace: async function (
namespace,
documentData = {},
[BETA] Live document sync (#1719) * wip bg workers for live document sync * Add ability to re-embed specific documents across many workspaces via background queue bgworkser is gated behind expieremental system setting flag that needs to be explictly enabled UI for watching/unwatching docments that are embedded. TODO: UI to easily manage all bg tasks and see run results TODO: UI to enable this feature and background endpoints to manage it * create frontend views and paths Move elements to correct experimental scope * update migration to delete runs on removal of watched document * Add watch support to YouTube transcripts (#1716) * Add watch support to YouTube transcripts refactor how sync is done for supported types * Watch specific files in Confluence space (#1718) Add failure-prune check for runs * create tmp workflow modifications for beta image * create tmp workflow modifications for beta image * create tmp workflow modifications for beta image * dual build update copy of alert modals * update job interval * Add support for live-sync of Github files * update copy for document sync feature * hide Experimental features from UI * update docs links * [FEAT] Implement new settings menu for experimental features (#1735) * implement new settings menu for experimental features * remove unused context save bar --------- Co-authored-by: timothycarambat <rambat1010@gmail.com> * dont run job on boot * unset workflow changes * Add persistent encryption service Relay key to collector so persistent encryption can be used Encrypt any private data in chunkSources used for replay during resync jobs * update jsDOC * Linting and organization * update modal copy for feature --------- Co-authored-by: Sean Hatfield <seanhatfield5@gmail.com>
2024-06-21 22:38:50 +02:00
fullFilePath = null,
skipCache = false
) {
const { DocumentVectors } = require("../../../models/vectors");
try {
let vectorDimension = null;
const { pageContent, docId, ...metadata } = documentData;
if (!pageContent || pageContent.length == 0) return false;
console.log("Adding new vectorized document into namespace", namespace);
[BETA] Live document sync (#1719) * wip bg workers for live document sync * Add ability to re-embed specific documents across many workspaces via background queue bgworkser is gated behind expieremental system setting flag that needs to be explictly enabled UI for watching/unwatching docments that are embedded. TODO: UI to easily manage all bg tasks and see run results TODO: UI to enable this feature and background endpoints to manage it * create frontend views and paths Move elements to correct experimental scope * update migration to delete runs on removal of watched document * Add watch support to YouTube transcripts (#1716) * Add watch support to YouTube transcripts refactor how sync is done for supported types * Watch specific files in Confluence space (#1718) Add failure-prune check for runs * create tmp workflow modifications for beta image * create tmp workflow modifications for beta image * create tmp workflow modifications for beta image * dual build update copy of alert modals * update job interval * Add support for live-sync of Github files * update copy for document sync feature * hide Experimental features from UI * update docs links * [FEAT] Implement new settings menu for experimental features (#1735) * implement new settings menu for experimental features * remove unused context save bar --------- Co-authored-by: timothycarambat <rambat1010@gmail.com> * dont run job on boot * unset workflow changes * Add persistent encryption service Relay key to collector so persistent encryption can be used Encrypt any private data in chunkSources used for replay during resync jobs * update jsDOC * Linting and organization * update modal copy for feature --------- Co-authored-by: Sean Hatfield <seanhatfield5@gmail.com>
2024-06-21 22:38:50 +02:00
if (skipCache) {
const cacheResult = await cachedVectorInformation(fullFilePath);
if (cacheResult.exists) {
const { client } = await this.connect();
const { chunks } = cacheResult;
const documentVectors = [];
vectorDimension =
chunks[0][0]?.vector?.length ??
chunks[0][0]?.values?.length ??
null;
const collection = await this.getOrCreateCollection(
client,
namespace,
[BETA] Live document sync (#1719) * wip bg workers for live document sync * Add ability to re-embed specific documents across many workspaces via background queue bgworkser is gated behind expieremental system setting flag that needs to be explictly enabled UI for watching/unwatching docments that are embedded. TODO: UI to easily manage all bg tasks and see run results TODO: UI to enable this feature and background endpoints to manage it * create frontend views and paths Move elements to correct experimental scope * update migration to delete runs on removal of watched document * Add watch support to YouTube transcripts (#1716) * Add watch support to YouTube transcripts refactor how sync is done for supported types * Watch specific files in Confluence space (#1718) Add failure-prune check for runs * create tmp workflow modifications for beta image * create tmp workflow modifications for beta image * create tmp workflow modifications for beta image * dual build update copy of alert modals * update job interval * Add support for live-sync of Github files * update copy for document sync feature * hide Experimental features from UI * update docs links * [FEAT] Implement new settings menu for experimental features (#1735) * implement new settings menu for experimental features * remove unused context save bar --------- Co-authored-by: timothycarambat <rambat1010@gmail.com> * dont run job on boot * unset workflow changes * Add persistent encryption service Relay key to collector so persistent encryption can be used Encrypt any private data in chunkSources used for replay during resync jobs * update jsDOC * Linting and organization * update modal copy for feature --------- Co-authored-by: Sean Hatfield <seanhatfield5@gmail.com>
2024-06-21 22:38:50 +02:00
vectorDimension
);
if (!collection)
throw new Error("Failed to create new QDrant collection!", {
namespace,
});
for (const chunk of chunks) {
const submission = {
ids: [],
vectors: [],
payloads: [],
};
// Before sending to Qdrant and saving the records to our db
// we need to assign the id of each chunk that is stored in the cached file.
// The id property must be defined or else it will be unable to be managed by ALLM.
chunk.forEach((chunk) => {
const id = uuidv4();
if (chunk?.payload?.hasOwnProperty("id")) {
const { id: _id, ...payload } = chunk.payload;
documentVectors.push({ docId, vectorId: id });
submission.ids.push(id);
submission.vectors.push(chunk.vector);
submission.payloads.push(payload);
} else {
console.error(
"The 'id' property is not defined in chunk.payload - it will be omitted from being inserted in QDrant collection."
);
}
});
const additionResult = await client.upsert(namespace, {
wait: true,
batch: { ...submission },
});
if (additionResult?.status !== "completed")
throw new Error("Error embedding into QDrant", additionResult);
}
await DocumentVectors.bulkInsert(documentVectors);
return { vectorized: true, error: null };
}
}
// If we are here then we are going to embed and store a novel document.
// We have to do this manually as opposed to using LangChains `Qdrant.fromDocuments`
// because we then cannot atomically control our namespace to granularly find/remove documents
// from vectordb.
const EmbedderEngine = getEmbeddingEngineSelection();
const textSplitter = new TextSplitter({
chunkSize: TextSplitter.determineMaxChunkSize(
await SystemSettings.getValueOrFallback({
label: "text_splitter_chunk_size",
}),
EmbedderEngine?.embeddingMaxChunkLength
),
chunkOverlap: await SystemSettings.getValueOrFallback(
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);
console.log("Chunks created from document:", textChunks.length);
const documentVectors = [];
const vectors = [];
const vectorValues = await EmbedderEngine.embedChunks(textChunks);
const submission = {
ids: [],
vectors: [],
payloads: [],
};
if (!!vectorValues && vectorValues.length > 0) {
for (const [i, vector] of vectorValues.entries()) {
if (!vectorDimension) vectorDimension = vector.length;
const vectorRecord = {
id: uuidv4(),
vector: vector,
// [DO NOT REMOVE]
// LangChain will be unable to find your text if you embed manually and dont include the `text` key.
// https://github.com/hwchase17/langchainjs/blob/2def486af734c0ca87285a48f1a04c057ab74bdf/langchain/src/vectorstores/pinecone.ts#L64
payload: { ...metadata, text: textChunks[i] },
};
submission.ids.push(vectorRecord.id);
submission.vectors.push(vectorRecord.vector);
submission.payloads.push(vectorRecord.payload);
vectors.push(vectorRecord);
documentVectors.push({ docId, vectorId: vectorRecord.id });
}
} else {
throw new Error(
"Could not embed document chunks! This document will not be recorded."
);
}
const { client } = await this.connect();
const collection = await this.getOrCreateCollection(
client,
namespace,
vectorDimension
);
if (!collection)
throw new Error("Failed to create new QDrant collection!", {
namespace,
});
if (vectors.length > 0) {
const chunks = [];
console.log("Inserting vectorized chunks into QDrant collection.");
for (const chunk of toChunks(vectors, 500)) chunks.push(chunk);
const additionResult = await client.upsert(namespace, {
wait: true,
batch: {
ids: submission.ids,
vectors: submission.vectors,
payloads: submission.payloads,
},
});
if (additionResult?.status !== "completed")
throw new Error("Error embedding into QDrant", additionResult);
await storeVectorResult(chunks, fullFilePath);
}
await DocumentVectors.bulkInsert(documentVectors);
return { vectorized: true, error: null };
} catch (e) {
console.error("addDocumentToNamespace", e.message);
return { vectorized: false, error: e.message };
}
},
deleteDocumentFromNamespace: async function (namespace, docId) {
const { DocumentVectors } = require("../../../models/vectors");
const { client } = await this.connect();
if (!(await this.namespaceExists(client, namespace))) return;
Replace custom sqlite dbms with prisma (#239) * WIP converted all sqlite models into prisma calls * modify db setup and fix ApiKey model calls in admin.js * renaming function params to be consistent * converted adminEndpoints to utilize prisma orm * converted chatEndpoints to utilize prisma orm * converted inviteEndpoints to utilize prisma orm * converted systemEndpoints to utilize prisma orm * converted workspaceEndpoints to utilize prisma orm * converting sql queries to prisma calls * fixed default param bug for orderBy and limit * fixed typo for workspace chats * fixed order of deletion to account for sql relations * fix invite CRUD and workspace management CRUD * fixed CRUD for api keys * created prisma setup scripts/docs for understanding how to use prisma * prisma dependency change * removing unneeded console.logs * removing unneeded sql escape function * linting and creating migration script * migration from depreciated sqlite script update * removing unneeded migrations in prisma folder * create backup of old sqlite db and use transactions to ensure all operations complete successfully * adding migrations to gitignore * updated PRISMA.md docs for info on how to use sqlite migration script * comment changes * adding back migrations folder to repo * Reviewing SQL and prisma integraiton on fresh repo * update inline key replacement * ensure migration script executes and maps foreign_keys regardless of db ordering * run migration endpoint * support new prisma backend * bump version * change migration call --------- Co-authored-by: timothycarambat <rambat1010@gmail.com>
2023-09-28 23:00:03 +02:00
const knownDocuments = await DocumentVectors.where({ docId });
if (knownDocuments.length === 0) return;
const vectorIds = knownDocuments.map((doc) => doc.vectorId);
await client.delete(namespace, {
wait: true,
points: vectorIds,
});
const indexes = knownDocuments.map((doc) => doc.id);
await DocumentVectors.deleteIds(indexes);
return true;
},
performSimilaritySearch: async function ({
namespace = null,
input = "",
LLMConnector = null,
similarityThreshold = 0.25,
topN = 4,
filterIdentifiers = [],
}) {
if (!namespace || !input || !LLMConnector)
throw new Error("Invalid request to performSimilaritySearch.");
const { client } = await this.connect();
if (!(await this.namespaceExists(client, namespace))) {
return {
contextTexts: [],
sources: [],
message: "Invalid query - no documents found for workspace!",
};
}
const queryVector = await LLMConnector.embedTextInput(input);
const { contextTexts, sourceDocuments } = await this.similarityResponse(
client,
namespace,
queryVector,
similarityThreshold,
topN,
filterIdentifiers
);
const sources = sourceDocuments.map((metadata, i) => {
return { ...metadata, text: contextTexts[i] };
});
return {
contextTexts,
sources: this.curateSources(sources),
message: false,
};
},
"namespace-stats": async function (reqBody = {}) {
const { namespace = null } = reqBody;
if (!namespace) throw new Error("namespace required");
const { client } = await this.connect();
if (!(await this.namespaceExists(client, namespace)))
throw new Error("Namespace by that name does not exist.");
const stats = await this.namespace(client, namespace);
return stats
? stats
: { message: "No stats were able to be fetched from DB for namespace" };
},
"delete-namespace": async function (reqBody = {}) {
const { namespace = null } = reqBody;
const { client } = await this.connect();
if (!(await this.namespaceExists(client, namespace)))
throw new Error("Namespace by that name does not exist.");
const details = await this.namespace(client, namespace);
await this.deleteVectorsInNamespace(client, namespace);
return {
message: `Namespace ${namespace} was deleted along with ${details?.vectorCount} vectors.`,
};
},
reset: async function () {
const { client } = await this.connect();
const response = await client.getCollections();
for (const collection of response.collections) {
await client.deleteCollection(collection.name);
}
return { reset: true };
},
curateSources: function (sources = []) {
const documents = [];
for (const source of sources) {
if (Object.keys(source).length > 0) {
const metadata = source.hasOwnProperty("metadata")
? source.metadata
: source;
documents.push({
...metadata,
});
}
}
return documents;
},
};
module.exports.QDrant = QDrant;