2023-10-30 23:44:03 +01:00
|
|
|
const { toChunks } = require("../../helpers");
|
|
|
|
|
|
|
|
class OpenAiEmbedder {
|
|
|
|
constructor() {
|
|
|
|
const { Configuration, OpenAIApi } = require("openai");
|
|
|
|
if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set.");
|
|
|
|
const config = new Configuration({
|
|
|
|
apiKey: process.env.OPEN_AI_KEY,
|
|
|
|
});
|
|
|
|
const openai = new OpenAIApi(config);
|
|
|
|
this.openai = openai;
|
|
|
|
|
2023-12-20 01:20:34 +01:00
|
|
|
// Limit of how many strings we can process in a single pass to stay with resource or network limits
|
2023-12-20 20:20:40 +01:00
|
|
|
this.maxConcurrentChunks = 500;
|
2023-12-08 01:27:36 +01:00
|
|
|
this.embeddingMaxChunkLength = 1_000;
|
2023-10-30 23:44:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
async embedTextInput(textInput) {
|
|
|
|
const result = await this.embedChunks(textInput);
|
|
|
|
return result?.[0] || [];
|
|
|
|
}
|
|
|
|
|
|
|
|
async embedChunks(textChunks = []) {
|
|
|
|
// Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb)
|
|
|
|
// we concurrently execute each max batch of text chunks possible.
|
2023-12-20 20:20:40 +01:00
|
|
|
// Refer to constructor maxConcurrentChunks for more info.
|
2023-10-30 23:44:03 +01:00
|
|
|
const embeddingRequests = [];
|
2023-12-20 20:20:40 +01:00
|
|
|
for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) {
|
2023-10-30 23:44:03 +01:00
|
|
|
embeddingRequests.push(
|
|
|
|
new Promise((resolve) => {
|
|
|
|
this.openai
|
|
|
|
.createEmbedding({
|
|
|
|
model: "text-embedding-ada-002",
|
|
|
|
input: chunk,
|
|
|
|
})
|
|
|
|
.then((res) => {
|
|
|
|
resolve({ data: res.data?.data, error: null });
|
|
|
|
})
|
|
|
|
.catch((e) => {
|
2024-01-18 20:40:48 +01:00
|
|
|
e.type =
|
|
|
|
e?.response?.data?.error?.code ||
|
|
|
|
e?.response?.status ||
|
|
|
|
"failed_to_embed";
|
|
|
|
e.message = e?.response?.data?.error?.message || e.message;
|
|
|
|
resolve({ data: [], error: e });
|
2023-10-30 23:44:03 +01:00
|
|
|
});
|
|
|
|
})
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
const { data = [], error = null } = await Promise.all(
|
|
|
|
embeddingRequests
|
|
|
|
).then((results) => {
|
|
|
|
// If any errors were returned from OpenAI abort the entire sequence because the embeddings
|
|
|
|
// will be incomplete.
|
|
|
|
const errors = results
|
|
|
|
.filter((res) => !!res.error)
|
|
|
|
.map((res) => res.error)
|
|
|
|
.flat();
|
|
|
|
if (errors.length > 0) {
|
2024-01-18 20:40:48 +01:00
|
|
|
let uniqueErrors = new Set();
|
|
|
|
errors.map((error) =>
|
|
|
|
uniqueErrors.add(`[${error.type}]: ${error.message}`)
|
|
|
|
);
|
|
|
|
|
2023-10-30 23:44:03 +01:00
|
|
|
return {
|
|
|
|
data: [],
|
2024-01-18 20:40:48 +01:00
|
|
|
error: Array.from(uniqueErrors).join(", "),
|
2023-10-30 23:44:03 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
return {
|
|
|
|
data: results.map((res) => res?.data || []).flat(),
|
|
|
|
error: null,
|
|
|
|
};
|
|
|
|
});
|
|
|
|
|
|
|
|
if (!!error) throw new Error(`OpenAI Failed to embed: ${error}`);
|
|
|
|
return data.length > 0 &&
|
|
|
|
data.every((embd) => embd.hasOwnProperty("embedding"))
|
|
|
|
? data.map((embd) => embd.embedding)
|
|
|
|
: null;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
module.exports = {
|
|
|
|
OpenAiEmbedder,
|
|
|
|
};
|