@@ -166,7 +173,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
- Chat Model Deployment Name
+ Chat Deployment Name
- Embedding Model Deployment Name
+ Embedding Deployment Name
)}
- {llmChoice === "anthropic-claude-2" && (
-
-
- This provider is unavailable and cannot be used in AnythingLLM
- currently.
-
+ {llmChoice === "anthropic" && (
+
+
+
+
+ Anthropic Claude-2 API Key
+
+
+
+
+
+
+ Chat Model Selection
+
+
+ {["claude-2"].map((model) => {
+ return (
+
+ {model}
+
+ );
+ })}
+
+
+
)}
diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx
index 0cdf05fa..a412ecc5 100644
--- a/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx
@@ -7,6 +7,7 @@ import UserModeSelection from "./Steps/UserModeSelection";
import PasswordProtection from "./Steps/PasswordProtection";
import MultiUserSetup from "./Steps/MultiUserSetup";
import CreateFirstWorkspace from "./Steps/CreateFirstWorkspace";
+import EmbeddingSelection from "./Steps/EmbeddingSelection";
const DIALOG_ID = "onboarding-modal";
@@ -54,6 +55,12 @@ const STEPS = {
description: "To get started, create a new workspace.",
component: CreateFirstWorkspace,
},
+ 8: {
+ title: "Embedding Preference",
+ description:
+ "Due to your LLM selection you need to set up a provider for embedding files and text.",
+ component: EmbeddingSelection,
+ },
};
export const OnboardingModalId = DIALOG_ID;
diff --git a/frontend/src/utils/paths.js b/frontend/src/utils/paths.js
index f914f63b..1d77abe5 100644
--- a/frontend/src/utils/paths.js
+++ b/frontend/src/utils/paths.js
@@ -43,6 +43,9 @@ export default {
llmPreference: () => {
return "/general/llm-preference";
},
+ embeddingPreference: () => {
+ return "/general/embedding-preference";
+ },
vectorDatabase: () => {
return "/general/vector-database";
},
diff --git a/server/.env.example b/server/.env.example
index 2f6ef041..d7a9cbe7 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -5,9 +5,9 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
###########################################
######## LLM API SElECTION ################
###########################################
-LLM_PROVIDER='openai'
+# LLM_PROVIDER='openai'
# OPEN_AI_KEY=
-OPEN_MODEL_PREF='gpt-3.5-turbo'
+# OPEN_MODEL_PREF='gpt-3.5-turbo'
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
@@ -15,6 +15,17 @@ OPEN_MODEL_PREF='gpt-3.5-turbo'
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
+# LLM_PROVIDER='anthropic'
+# ANTHROPIC_API_KEY=sk-ant-xxxx
+# ANTHROPIC_MODEL_PREF='claude-2'
+
+###########################################
+######## Embedding API SElECTION ##########
+###########################################
+# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
+# EMBEDDING_ENGINE='openai'
+# OPEN_AI_KEY=sk-xxxx
+
###########################################
######## Vector Database Selection ########
###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 836d5907..4d2f73b3 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -24,6 +24,7 @@ const SystemSettings = {
StorageDir: process.env.STORAGE_DIR,
MultiUserMode: await this.isMultiUserMode(),
VectorDB: vectorDB,
+ EmbeddingEngine: process.env.EMBEDDING_ENGINE,
...(vectorDB === "pinecone"
? {
PineConeEnvironment: process.env.PINECONE_ENVIRONMENT,
@@ -66,6 +67,19 @@ const SystemSettings = {
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
+
+ ...(llmProvider === "anthropic"
+ ? {
+ AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
+ AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
+
+ // For embedding credentials when Anthropic is selected.
+ OpenAiKey: !!process.env.OPEN_AI_KEY,
+ AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
+ AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
+ AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
+ }
+ : {}),
};
},
diff --git a/server/package.json b/server/package.json
index eb2e8470..62879b83 100644
--- a/server/package.json
+++ b/server/package.json
@@ -20,6 +20,7 @@
"seed": "node prisma/seed.js"
},
"dependencies": {
+ "@anthropic-ai/sdk": "^0.8.1",
"@azure/openai": "^1.0.0-beta.3",
"@googleapis/youtube": "^9.0.0",
"@pinecone-database/pinecone": "^0.1.6",
@@ -59,4 +60,4 @@
"nodemon": "^2.0.22",
"prettier": "^2.4.1"
}
-}
\ No newline at end of file
+}
diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js
new file mode 100644
index 00000000..d3dd68f2
--- /dev/null
+++ b/server/utils/AiProviders/anthropic/index.js
@@ -0,0 +1,144 @@
+const { v4 } = require("uuid");
+const { chatPrompt } = require("../../chats");
+
+class AnthropicLLM {
+ constructor(embedder = null) {
+ if (!process.env.ANTHROPIC_API_KEY)
+ throw new Error("No Anthropic API key was set.");
+
+ // Docs: https://www.npmjs.com/package/@anthropic-ai/sdk
+ const AnthropicAI = require("@anthropic-ai/sdk");
+ const anthropic = new AnthropicAI({
+ apiKey: process.env.ANTHROPIC_API_KEY,
+ });
+ this.anthropic = anthropic;
+
+ if (!embedder)
+ throw new Error(
+ "INVALID ANTHROPIC SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Anthropic as your LLM."
+ );
+ this.embedder = embedder;
+ this.answerKey = v4().split("-")[0];
+ }
+
+ isValidChatModel(modelName = "") {
+ const validModels = ["claude-2"];
+ return validModels.includes(modelName);
+ }
+
+ // Moderation can be done with Anthropic, but its not really "exact" so we skip it
+ // https://docs.anthropic.com/claude/docs/content-moderation
+ async isSafe(_input = "") {
+ // Not implemented so must be stubbed
+ return { safe: true, reasons: [] };
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ return `\n\nHuman: Please read question supplied within the
tags. Using all information generate an answer to the question and output it within <${
+ this.answerKey
+ }> tags. Previous conversations can be used within the tags and can be used to influence the output. Content between the tag is additional information and instruction that will impact how answers are formatted or responded to. Additional contextual information retrieved to help answer the users specific query is available to use for answering and can be found between tags. When no tags may are present use the knowledge available and in the conversation to answer. When one or more tags are available you will use those to help answer the question or augment pre-existing knowledge. You should never say "Based on the provided context" or other phrasing that is not related to the user question.
+ ${systemPrompt}
+ ${contextTexts
+ .map((text, i) => {
+ return `${text} \n`;
+ })
+ .join("")}
+ ${chatHistory.map((history) => {
+ switch (history.role) {
+ case "assistant":
+ return `\n\nAssistant: ${history.content}`;
+ case "user":
+ return `\n\nHuman: ${history.content}`;
+ default:
+ return "\n";
+ }
+ })}
+ ${userPrompt}
+ \n\nAssistant:`;
+ }
+
+ // This is the interface used when no embeddings are present in the workspace
+ // This is just having a conversation with the LLM as one would normally.
+ async sendChat(chatHistory = [], prompt, workspace = {}) {
+ const model = process.env.ANTHROPIC_MODEL_PREF || "claude-2";
+ if (!this.isValidChatModel(model))
+ throw new Error(
+ `Anthropic chat: ${model} is not valid for chat completion!`
+ );
+
+ const { content, error } = await this.anthropic.completions
+ .create({
+ model: "claude-2",
+ max_tokens_to_sample: 300,
+ prompt: this.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ userPrompt: prompt,
+ chatHistory,
+ }),
+ })
+ .then((res) => {
+ const { completion } = res;
+ const re = new RegExp(
+ "(?:<" + this.answerKey + ">)([\\s\\S]*)(?:" + this.answerKey + ">)"
+ );
+ const response = completion.match(re)?.[1]?.trim();
+ if (!response)
+ throw new Error("Anthropic: No response could be parsed.");
+ return { content: response, error: null };
+ })
+ .catch((e) => {
+ return { content: null, error: e.message };
+ });
+
+ if (error) throw new Error(error);
+ return content;
+ }
+
+ async getChatCompletion(prompt = "", _opts = {}) {
+ const model = process.env.ANTHROPIC_MODEL_PREF || "claude-2";
+ if (!this.isValidChatModel(model))
+ throw new Error(
+ `Anthropic chat: ${model} is not valid for chat completion!`
+ );
+
+ const { content, error } = await this.anthropic.completions
+ .create({
+ model: "claude-2",
+ max_tokens_to_sample: 300,
+ prompt,
+ })
+ .then((res) => {
+ const { completion } = res;
+ const re = new RegExp(
+ "(?:<" + this.answerKey + ">)([\\s\\S]*)(?:" + this.answerKey + ">)"
+ );
+ const response = completion.match(re)?.[1]?.trim();
+ if (!response)
+ throw new Error("Anthropic: No response could be parsed.");
+ return { content: response, error: null };
+ })
+ .catch((e) => {
+ return { content: null, error: e.message };
+ });
+
+ if (error) throw new Error(error);
+ return content;
+ }
+
+ // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+}
+
+module.exports = {
+ AnthropicLLM,
+};
diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js
index abb459fb..6c450c5d 100644
--- a/server/utils/AiProviders/azureOpenAi/index.js
+++ b/server/utils/AiProviders/azureOpenAi/index.js
@@ -1,17 +1,18 @@
-const { toChunks } = require("../../helpers");
+const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi");
-class AzureOpenAi {
+class AzureOpenAiLLM extends AzureOpenAiEmbedder {
constructor() {
+ super();
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
- const openai = new OpenAIClient(
+ if (!process.env.AZURE_OPENAI_ENDPOINT)
+ throw new Error("No Azure API endpoint was set.");
+ if (!process.env.AZURE_OPENAI_KEY)
+ throw new Error("No Azure API key was set.");
+
+ this.openai = new OpenAIClient(
process.env.AZURE_OPENAI_ENDPOINT,
new AzureKeyCredential(process.env.AZURE_OPENAI_KEY)
);
- this.openai = openai;
-
- // The maximum amount of "inputs" that OpenAI API can process in a single call.
- // https://learn.microsoft.com/en-us/azure/ai-services/openai/faq#i-am-trying-to-use-embeddings-and-received-the-error--invalidrequesterror--too-many-inputs--the-max-number-of-inputs-is-1---how-do-i-fix-this-:~:text=consisting%20of%20up%20to%2016%20inputs%20per%20API%20request
- this.embeddingChunkLimit = 16;
}
isValidChatModel(_modelName = "") {
@@ -21,6 +22,25 @@ class AzureOpenAi {
return true;
}
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}
+ Context:
+ ${contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
async isSafe(_input = "") {
// Not implemented by Azure OpenAI so must be stubbed
return { safe: true, reasons: [] };
@@ -75,70 +95,8 @@ class AzureOpenAi {
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
-
- async embedTextInput(textInput) {
- const result = await this.embedChunks(textInput);
- return result?.[0] || [];
- }
-
- async embedChunks(textChunks = []) {
- const textEmbeddingModel =
- process.env.EMBEDDING_MODEL_PREF || "text-embedding-ada-002";
- if (!textEmbeddingModel)
- throw new Error(
- "No EMBEDDING_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an embedding model."
- );
-
- // Because there is a limit on how many chunks can be sent at once to Azure OpenAI
- // we concurrently execute each max batch of text chunks possible.
- // Refer to constructor embeddingChunkLimit for more info.
- const embeddingRequests = [];
- for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) {
- embeddingRequests.push(
- new Promise((resolve) => {
- this.openai
- .getEmbeddings(textEmbeddingModel, chunk)
- .then((res) => {
- resolve({ data: res.data, error: null });
- })
- .catch((e) => {
- resolve({ data: [], error: e?.error });
- });
- })
- );
- }
-
- const { data = [], error = null } = await Promise.all(
- embeddingRequests
- ).then((results) => {
- // If any errors were returned from Azure abort the entire sequence because the embeddings
- // will be incomplete.
- const errors = results
- .filter((res) => !!res.error)
- .map((res) => res.error)
- .flat();
- if (errors.length > 0) {
- return {
- data: [],
- error: `(${errors.length}) Embedding Errors! ${errors
- .map((error) => `[${error.type}]: ${error.message}`)
- .join(", ")}`,
- };
- }
- return {
- data: results.map((res) => res?.data || []).flat(),
- error: null,
- };
- });
-
- if (!!error) throw new Error(`Azure OpenAI Failed to embed: ${error}`);
- return data.length > 0 &&
- data.every((embd) => embd.hasOwnProperty("embedding"))
- ? data.map((embd) => embd.embedding)
- : null;
- }
}
module.exports = {
- AzureOpenAi,
+ AzureOpenAiLLM,
};
diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js
index dc7e47c6..d4d54bd6 100644
--- a/server/utils/AiProviders/openAi/index.js
+++ b/server/utils/AiProviders/openAi/index.js
@@ -1,16 +1,15 @@
-const { toChunks } = require("../../helpers");
+const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
-class OpenAi {
+class OpenAiLLM extends OpenAiEmbedder {
constructor() {
+ super();
const { Configuration, OpenAIApi } = require("openai");
+ if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set.");
+
const config = new Configuration({
apiKey: process.env.OPEN_AI_KEY,
});
- const openai = new OpenAIApi(config);
- this.openai = openai;
-
- // Arbitrary limit to ensure we stay within reasonable POST request size.
- this.embeddingChunkLimit = 1_000;
+ this.openai = new OpenAIApi(config);
}
isValidChatModel(modelName = "") {
@@ -18,6 +17,25 @@ class OpenAi {
return validModels.includes(modelName);
}
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}
+ Context:
+ ${contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
async isSafe(input = "") {
const { flagged = false, categories = {} } = await this.openai
.createModeration({ input })
@@ -97,66 +115,8 @@ class OpenAi {
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
-
- async embedTextInput(textInput) {
- const result = await this.embedChunks(textInput);
- return result?.[0] || [];
- }
-
- async embedChunks(textChunks = []) {
- // Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb)
- // we concurrently execute each max batch of text chunks possible.
- // Refer to constructor embeddingChunkLimit for more info.
- const embeddingRequests = [];
- for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) {
- embeddingRequests.push(
- new Promise((resolve) => {
- this.openai
- .createEmbedding({
- model: "text-embedding-ada-002",
- input: chunk,
- })
- .then((res) => {
- resolve({ data: res.data?.data, error: null });
- })
- .catch((e) => {
- resolve({ data: [], error: e?.error });
- });
- })
- );
- }
-
- const { data = [], error = null } = await Promise.all(
- embeddingRequests
- ).then((results) => {
- // If any errors were returned from OpenAI abort the entire sequence because the embeddings
- // will be incomplete.
- const errors = results
- .filter((res) => !!res.error)
- .map((res) => res.error)
- .flat();
- if (errors.length > 0) {
- return {
- data: [],
- error: `(${errors.length}) Embedding Errors! ${errors
- .map((error) => `[${error.type}]: ${error.message}`)
- .join(", ")}`,
- };
- }
- return {
- data: results.map((res) => res?.data || []).flat(),
- error: null,
- };
- });
-
- if (!!error) throw new Error(`OpenAI Failed to embed: ${error}`);
- return data.length > 0 &&
- data.every((embd) => embd.hasOwnProperty("embedding"))
- ? data.map((embd) => embd.embedding)
- : null;
- }
}
module.exports = {
- OpenAi,
+ OpenAiLLM,
};
diff --git a/server/utils/EmbeddingEngines/azureOpenAi/index.js b/server/utils/EmbeddingEngines/azureOpenAi/index.js
new file mode 100644
index 00000000..554538fa
--- /dev/null
+++ b/server/utils/EmbeddingEngines/azureOpenAi/index.js
@@ -0,0 +1,87 @@
+const { toChunks } = require("../../helpers");
+
+class AzureOpenAiEmbedder {
+ constructor() {
+ const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
+ if (!process.env.AZURE_OPENAI_ENDPOINT)
+ throw new Error("No Azure API endpoint was set.");
+ if (!process.env.AZURE_OPENAI_KEY)
+ throw new Error("No Azure API key was set.");
+
+ const openai = new OpenAIClient(
+ process.env.AZURE_OPENAI_ENDPOINT,
+ new AzureKeyCredential(process.env.AZURE_OPENAI_KEY)
+ );
+ this.openai = openai;
+
+ // The maximum amount of "inputs" that OpenAI API can process in a single call.
+ // https://learn.microsoft.com/en-us/azure/ai-services/openai/faq#i-am-trying-to-use-embeddings-and-received-the-error--invalidrequesterror--too-many-inputs--the-max-number-of-inputs-is-1---how-do-i-fix-this-:~:text=consisting%20of%20up%20to%2016%20inputs%20per%20API%20request
+ this.embeddingChunkLimit = 16;
+ }
+
+ async embedTextInput(textInput) {
+ const result = await this.embedChunks(textInput);
+ return result?.[0] || [];
+ }
+
+ async embedChunks(textChunks = []) {
+ const textEmbeddingModel =
+ process.env.EMBEDDING_MODEL_PREF || "text-embedding-ada-002";
+ if (!textEmbeddingModel)
+ throw new Error(
+ "No EMBEDDING_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an embedding model."
+ );
+
+ // Because there is a limit on how many chunks can be sent at once to Azure OpenAI
+ // we concurrently execute each max batch of text chunks possible.
+ // Refer to constructor embeddingChunkLimit for more info.
+ const embeddingRequests = [];
+ for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) {
+ embeddingRequests.push(
+ new Promise((resolve) => {
+ this.openai
+ .getEmbeddings(textEmbeddingModel, chunk)
+ .then((res) => {
+ resolve({ data: res.data, error: null });
+ })
+ .catch((e) => {
+ resolve({ data: [], error: e?.error });
+ });
+ })
+ );
+ }
+
+ const { data = [], error = null } = await Promise.all(
+ embeddingRequests
+ ).then((results) => {
+ // If any errors were returned from Azure abort the entire sequence because the embeddings
+ // will be incomplete.
+ const errors = results
+ .filter((res) => !!res.error)
+ .map((res) => res.error)
+ .flat();
+ if (errors.length > 0) {
+ return {
+ data: [],
+ error: `(${errors.length}) Embedding Errors! ${errors
+ .map((error) => `[${error.type}]: ${error.message}`)
+ .join(", ")}`,
+ };
+ }
+ return {
+ data: results.map((res) => res?.data || []).flat(),
+ error: null,
+ };
+ });
+
+ if (!!error) throw new Error(`Azure OpenAI Failed to embed: ${error}`);
+ return data.length > 0 &&
+ data.every((embd) => embd.hasOwnProperty("embedding"))
+ ? data.map((embd) => embd.embedding)
+ : null;
+ }
+}
+
+module.exports = {
+ AzureOpenAiEmbedder,
+};
diff --git a/server/utils/EmbeddingEngines/openAi/index.js b/server/utils/EmbeddingEngines/openAi/index.js
new file mode 100644
index 00000000..2c4c8c24
--- /dev/null
+++ b/server/utils/EmbeddingEngines/openAi/index.js
@@ -0,0 +1,78 @@
+const { toChunks } = require("../../helpers");
+
+class OpenAiEmbedder {
+ constructor() {
+ const { Configuration, OpenAIApi } = require("openai");
+ if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set.");
+ const config = new Configuration({
+ apiKey: process.env.OPEN_AI_KEY,
+ });
+ const openai = new OpenAIApi(config);
+ this.openai = openai;
+
+ // Arbitrary limit to ensure we stay within reasonable POST request size.
+ this.embeddingChunkLimit = 1_000;
+ }
+
+ async embedTextInput(textInput) {
+ const result = await this.embedChunks(textInput);
+ return result?.[0] || [];
+ }
+
+ async embedChunks(textChunks = []) {
+ // Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb)
+ // we concurrently execute each max batch of text chunks possible.
+ // Refer to constructor embeddingChunkLimit for more info.
+ const embeddingRequests = [];
+ for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) {
+ embeddingRequests.push(
+ new Promise((resolve) => {
+ this.openai
+ .createEmbedding({
+ model: "text-embedding-ada-002",
+ input: chunk,
+ })
+ .then((res) => {
+ resolve({ data: res.data?.data, error: null });
+ })
+ .catch((e) => {
+ resolve({ data: [], error: e?.error });
+ });
+ })
+ );
+ }
+
+ const { data = [], error = null } = await Promise.all(
+ embeddingRequests
+ ).then((results) => {
+ // If any errors were returned from OpenAI abort the entire sequence because the embeddings
+ // will be incomplete.
+ const errors = results
+ .filter((res) => !!res.error)
+ .map((res) => res.error)
+ .flat();
+ if (errors.length > 0) {
+ return {
+ data: [],
+ error: `(${errors.length}) Embedding Errors! ${errors
+ .map((error) => `[${error.type}]: ${error.message}`)
+ .join(", ")}`,
+ };
+ }
+ return {
+ data: results.map((res) => res?.data || []).flat(),
+ error: null,
+ };
+ });
+
+ if (!!error) throw new Error(`OpenAI Failed to embed: ${error}`);
+ return data.length > 0 &&
+ data.every((embd) => embd.hasOwnProperty("embedding"))
+ ? data.map((embd) => embd.embedding)
+ : null;
+ }
+}
+
+module.exports = {
+ OpenAiEmbedder,
+};
diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js
index a3a96bc2..3873c3ac 100644
--- a/server/utils/chats/index.js
+++ b/server/utils/chats/index.js
@@ -1,10 +1,8 @@
const { v4: uuidv4 } = require("uuid");
-const { OpenAi } = require("../AiProviders/openAi");
const { WorkspaceChats } = require("../../models/workspaceChats");
const { resetMemory } = require("./commands/reset");
const moment = require("moment");
const { getVectorDbClass, getLLMProvider } = require("../helpers");
-const { AzureOpenAi } = require("../AiProviders/azureOpenAi");
function convertToChatHistory(history = []) {
const formattedHistory = [];
@@ -67,14 +65,14 @@ async function chatWithWorkspace(
user = null
) {
const uuid = uuidv4();
- const LLMConnector = getLLMProvider();
- const VectorDb = getVectorDbClass();
const command = grepCommand(message);
if (!!command && Object.keys(VALID_COMMANDS).includes(command)) {
return await VALID_COMMANDS[command](workspace, message, uuid, user);
}
+ const LLMConnector = getLLMProvider();
+ const VectorDb = getVectorDbClass();
const { safe, reasons = [] } = await LLMConnector.isSafe(message);
if (!safe) {
return {
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index b077606a..699ac80f 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -25,16 +25,36 @@ function getLLMProvider() {
const vectorSelection = process.env.LLM_PROVIDER || "openai";
switch (vectorSelection) {
case "openai":
- const { OpenAi } = require("../AiProviders/openAi");
- return new OpenAi();
+ const { OpenAiLLM } = require("../AiProviders/openAi");
+ return new OpenAiLLM();
case "azure":
- const { AzureOpenAi } = require("../AiProviders/azureOpenAi");
- return new AzureOpenAi();
+ const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
+ return new AzureOpenAiLLM();
+ case "anthropic":
+ const { AnthropicLLM } = require("../AiProviders/anthropic");
+ const embedder = getEmbeddingEngineSelection();
+ return new AnthropicLLM(embedder);
default:
throw new Error("ENV: No LLM_PROVIDER value found in environment!");
}
}
+function getEmbeddingEngineSelection() {
+ const engineSelection = process.env.EMBEDDING_ENGINE;
+ switch (engineSelection) {
+ case "openai":
+ const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
+ return new OpenAiEmbedder();
+ case "azure":
+ const {
+ AzureOpenAiEmbedder,
+ } = require("../../EmbeddingEngines/azureOpenAi");
+ return new AzureOpenAiEmbedder();
+ default:
+ return null;
+ }
+}
+
function toChunks(arr, size) {
return Array.from({ length: Math.ceil(arr.length / size) }, (_v, i) =>
arr.slice(i * size, i * size + size)
@@ -42,6 +62,7 @@ function toChunks(arr, size) {
}
module.exports = {
+ getEmbeddingEngineSelection,
getVectorDbClass,
getLLMProvider,
toChunks,
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 88b07989..3e6d6429 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -30,6 +30,21 @@ const KEY_MAPPING = {
checks: [isNotEmpty],
},
+ // Anthropic Settings
+ AnthropicApiKey: {
+ envKey: "ANTHROPIC_API_KEY",
+ checks: [isNotEmpty, validAnthropicApiKey],
+ },
+ AnthropicModelPref: {
+ envKey: "ANTHROPIC_MODEL_PREF",
+ checks: [isNotEmpty, validAnthropicModel],
+ },
+
+ EmbeddingEngine: {
+ envKey: "EMBEDDING_ENGINE",
+ checks: [supportedEmbeddingModel],
+ },
+
// Vector Database Selection Settings
VectorDB: {
envKey: "VECTOR_DB",
@@ -113,8 +128,14 @@ function validOpenAIKey(input = "") {
return input.startsWith("sk-") ? null : "OpenAI Key must start with sk-";
}
+function validAnthropicApiKey(input = "") {
+ return input.startsWith("sk-ant-")
+ ? null
+ : "Anthropic Key must start with sk-ant-";
+}
+
function supportedLLM(input = "") {
- return ["openai", "azure"].includes(input);
+ return ["openai", "azure", "anthropic"].includes(input);
}
function validOpenAIModel(input = "") {
@@ -124,6 +145,20 @@ function validOpenAIModel(input = "") {
: `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
}
+function validAnthropicModel(input = "") {
+ const validModels = ["claude-2"];
+ return validModels.includes(input)
+ ? null
+ : `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
+}
+
+function supportedEmbeddingModel(input = "") {
+ const supported = ["openai", "azure"];
+ return supported.includes(input)
+ ? null
+ : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;
+}
+
function supportedVectorDB(input = "") {
const supported = ["chroma", "pinecone", "lancedb", "weaviate", "qdrant"];
return supported.includes(input)
diff --git a/server/utils/vectorDbProviders/chroma/index.js b/server/utils/vectorDbProviders/chroma/index.js
index 8e33b35e..fdc4cbe4 100644
--- a/server/utils/vectorDbProviders/chroma/index.js
+++ b/server/utils/vectorDbProviders/chroma/index.js
@@ -273,17 +273,11 @@ const Chroma = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
@@ -328,17 +322,12 @@ const Chroma = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, ...chatHistory, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ chatHistory,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
diff --git a/server/utils/vectorDbProviders/lance/index.js b/server/utils/vectorDbProviders/lance/index.js
index 9d446058..bb150958 100644
--- a/server/utils/vectorDbProviders/lance/index.js
+++ b/server/utils/vectorDbProviders/lance/index.js
@@ -246,17 +246,11 @@ const LanceDb = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
@@ -296,17 +290,12 @@ const LanceDb = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, ...chatHistory, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ chatHistory,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
diff --git a/server/utils/vectorDbProviders/pinecone/index.js b/server/utils/vectorDbProviders/pinecone/index.js
index 79b0d40b..fc7f4d31 100644
--- a/server/utils/vectorDbProviders/pinecone/index.js
+++ b/server/utils/vectorDbProviders/pinecone/index.js
@@ -242,18 +242,11 @@ const Pinecone = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
-
- const memory = [prompt, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
@@ -290,18 +283,12 @@ const Pinecone = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
-
- const memory = [prompt, ...chatHistory, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ chatHistory,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
diff --git a/server/utils/vectorDbProviders/qdrant/index.js b/server/utils/vectorDbProviders/qdrant/index.js
index 2ee8e6ed..9925c6e4 100644
--- a/server/utils/vectorDbProviders/qdrant/index.js
+++ b/server/utils/vectorDbProviders/qdrant/index.js
@@ -282,17 +282,11 @@ const QDrant = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
@@ -332,17 +326,12 @@ const QDrant = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, ...chatHistory, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ chatHistory,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
diff --git a/server/utils/vectorDbProviders/weaviate/index.js b/server/utils/vectorDbProviders/weaviate/index.js
index 8543db7d..1a43e3c5 100644
--- a/server/utils/vectorDbProviders/weaviate/index.js
+++ b/server/utils/vectorDbProviders/weaviate/index.js
@@ -353,18 +353,11 @@ const Weaviate = {
namespace,
queryVector
);
-
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
@@ -404,17 +397,12 @@ const Weaviate = {
namespace,
queryVector
);
- const prompt = {
- role: "system",
- content: `${chatPrompt(workspace)}
- Context:
- ${contextTexts
- .map((text, i) => {
- return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
- })
- .join("")}`,
- };
- const memory = [prompt, ...chatHistory, { role: "user", content: input }];
+ const memory = LLMConnector.constructPrompt({
+ systemPrompt: chatPrompt(workspace),
+ contextTexts: contextTexts,
+ userPrompt: input,
+ chatHistory,
+ });
const responseText = await LLMConnector.getChatCompletion(memory, {
temperature: workspace?.openAiTemp ?? 0.7,
});
diff --git a/server/yarn.lock b/server/yarn.lock
index 6eb89c2d..01479024 100644
--- a/server/yarn.lock
+++ b/server/yarn.lock
@@ -10,6 +10,21 @@
"@fortaine/fetch-event-source" "^3.0.6"
cross-fetch "^3.1.5"
+"@anthropic-ai/sdk@^0.8.1":
+ version "0.8.1"
+ resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.8.1.tgz#7c7c6cb262abe3e6d0bb8bd1179b4589edd7a6ad"
+ integrity sha512-59etePenCizVx1O8Qhi1T1ruE04ISfNzCnyhZNcsss1QljsLmYS83jttarMNEvGYcsUF7rwxw2lzcC3Zbxao7g==
+ dependencies:
+ "@types/node" "^18.11.18"
+ "@types/node-fetch" "^2.6.4"
+ abort-controller "^3.0.0"
+ agentkeepalive "^4.2.1"
+ digest-fetch "^1.3.0"
+ form-data-encoder "1.7.2"
+ formdata-node "^4.3.2"
+ node-fetch "^2.6.7"
+ web-streams-polyfill "^3.2.1"
+
"@apache-arrow/ts@^12.0.0":
version "12.0.1"
resolved "https://registry.yarnpkg.com/@apache-arrow/ts/-/ts-12.0.1.tgz#a802a28f450886e77b32c516c370c24941767455"
@@ -229,6 +244,14 @@
resolved "https://registry.yarnpkg.com/@types/command-line-usage/-/command-line-usage-5.0.2.tgz#ba5e3f6ae5a2009d466679cc431b50635bf1a064"
integrity sha512-n7RlEEJ+4x4TS7ZQddTmNSxP+zziEG0TNsMfiRIxcIVXt71ENJ9ojeXmGO3wPoTdn7pJcU2xc3CJYMktNT6DPg==
+"@types/node-fetch@^2.6.4":
+ version "2.6.7"
+ resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.7.tgz#a1abe2ce24228b58ad97f99480fdcf9bbc6ab16d"
+ integrity sha512-lX17GZVpJ/fuCjguZ5b3TjEbSENxmEk1B2z02yoXSK9WMEWRivhdSY73wWMn6bpcCDAOh6qAdktpKHIlkDk2lg==
+ dependencies:
+ "@types/node" "*"
+ form-data "^4.0.0"
+
"@types/node@*":
version "20.4.2"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.4.2.tgz#129cc9ae69f93824f92fac653eebfb4812ab4af9"
@@ -239,6 +262,13 @@
resolved "https://registry.yarnpkg.com/@types/node/-/node-18.14.5.tgz#4a13a6445862159303fc38586598a9396fc408b3"
integrity sha512-CRT4tMK/DHYhw1fcCEBwME9CSaZNclxfzVMe7GsO6ULSwsttbj70wSiX6rZdIjGblu93sTJxLdhNIT85KKI7Qw==
+"@types/node@^18.11.18":
+ version "18.18.7"
+ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.18.7.tgz#bb3a7068dc4ba421b6968f2a259298b3a4e129e8"
+ integrity sha512-bw+lEsxis6eqJYW8Ql6+yTqkE6RuFtsQPSe5JxXbqYRFQEER5aJA9a5UH9igqDWm3X4iLHIKOHlnAXLM4mi7uQ==
+ dependencies:
+ undici-types "~5.26.4"
+
"@types/pad-left@2.1.1":
version "2.1.1"
resolved "https://registry.yarnpkg.com/@types/pad-left/-/pad-left-2.1.1.tgz#17d906fc75804e1cc722da73623f1d978f16a137"
@@ -261,6 +291,13 @@ abbrev@1:
resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8"
integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==
+abort-controller@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392"
+ integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==
+ dependencies:
+ event-target-shim "^5.0.0"
+
accepts@~1.3.4, accepts@~1.3.8:
version "1.3.8"
resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e"
@@ -290,6 +327,13 @@ agentkeepalive@^4.1.3:
depd "^2.0.0"
humanize-ms "^1.2.1"
+agentkeepalive@^4.2.1:
+ version "4.5.0"
+ resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.5.0.tgz#2673ad1389b3c418c5a20c5d7364f93ca04be923"
+ integrity sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==
+ dependencies:
+ humanize-ms "^1.2.1"
+
aggregate-error@^3.0.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a"
@@ -444,6 +488,11 @@ balanced-match@^1.0.0:
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
+base-64@^0.1.0:
+ version "0.1.0"
+ resolved "https://registry.yarnpkg.com/base-64/-/base-64-0.1.0.tgz#780a99c84e7d600260361511c4877613bf24f6bb"
+ integrity sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==
+
base64-js@^1.3.0, base64-js@^1.3.1, base64-js@^1.5.1:
version "1.5.1"
resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a"
@@ -620,6 +669,11 @@ chalk@^2.4.2:
escape-string-regexp "^1.0.5"
supports-color "^5.3.0"
+charenc@0.0.2:
+ version "0.0.2"
+ resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667"
+ integrity sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==
+
check-disk-space@^3.4.0:
version "3.4.0"
resolved "https://registry.yarnpkg.com/check-disk-space/-/check-disk-space-3.4.0.tgz#eb8e69eee7a378fd12e35281b8123a8b4c4a8ff7"
@@ -796,6 +850,11 @@ cross-fetch@^3.1.5:
dependencies:
node-fetch "^2.6.12"
+crypt@0.0.2:
+ version "0.0.2"
+ resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b"
+ integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==
+
debug@2.6.9:
version "2.6.9"
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
@@ -857,6 +916,14 @@ detect-libc@^2.0.0:
resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-2.0.2.tgz#8ccf2ba9315350e1241b88d0ac3b0e1fbd99605d"
integrity sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==
+digest-fetch@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/digest-fetch/-/digest-fetch-1.3.0.tgz#898e69264d00012a23cf26e8a3e40320143fc661"
+ integrity sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==
+ dependencies:
+ base-64 "^0.1.0"
+ md5 "^2.3.0"
+
dotenv@^16.0.3:
version "16.3.1"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.3.1.tgz#369034de7d7e5b120972693352a3bf112172cc3e"
@@ -928,6 +995,11 @@ etag@~1.8.1:
resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887"
integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==
+event-target-shim@^5.0.0:
+ version "5.0.1"
+ resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789"
+ integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==
+
eventemitter3@^4.0.4:
version "4.0.7"
resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f"
@@ -1050,6 +1122,11 @@ follow-redirects@^1.14.8, follow-redirects@^1.14.9:
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
+form-data-encoder@1.7.2:
+ version "1.7.2"
+ resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-1.7.2.tgz#1f1ae3dccf58ed4690b86d87e4f57c654fbab040"
+ integrity sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==
+
form-data@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f"
@@ -1068,6 +1145,14 @@ form-data@^4.0.0:
combined-stream "^1.0.8"
mime-types "^2.1.12"
+formdata-node@^4.3.2:
+ version "4.4.1"
+ resolved "https://registry.yarnpkg.com/formdata-node/-/formdata-node-4.4.1.tgz#23f6a5cb9cb55315912cbec4ff7b0f59bbd191e2"
+ integrity sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==
+ dependencies:
+ node-domexception "1.0.0"
+ web-streams-polyfill "4.0.0-beta.3"
+
forwarded@0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811"
@@ -1416,6 +1501,11 @@ is-binary-path@~2.1.0:
dependencies:
binary-extensions "^2.0.0"
+is-buffer@~1.1.6:
+ version "1.1.6"
+ resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be"
+ integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==
+
is-extglob@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
@@ -1675,6 +1765,15 @@ make-fetch-happen@^9.1.0:
socks-proxy-agent "^6.0.0"
ssri "^8.0.0"
+md5@^2.3.0:
+ version "2.3.0"
+ resolved "https://registry.yarnpkg.com/md5/-/md5-2.3.0.tgz#c3da9a6aae3a30b46b7b0c349b87b110dc3bda4f"
+ integrity sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==
+ dependencies:
+ charenc "0.0.2"
+ crypt "0.0.2"
+ is-buffer "~1.1.6"
+
media-typer@0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
@@ -1886,6 +1985,11 @@ node-addon-api@^5.0.0:
resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-5.1.0.tgz#49da1ca055e109a23d537e9de43c09cca21eb762"
integrity sha512-eh0GgfEkpnoWDq+VY8OyvYhFEzBk6jIYbRKdIlyTiAXIVJ8PyBaKb0rp7oDtoddbdoHWhq8wwr+XZ81F1rpNdA==
+node-domexception@1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/node-domexception/-/node-domexception-1.0.0.tgz#6888db46a1f71c0b76b3f7555016b63fe64766e5"
+ integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==
+
node-fetch@^2.6.1, node-fetch@^2.6.12, node-fetch@^2.6.7, node-fetch@^2.6.9:
version "2.6.12"
resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.12.tgz#02eb8e22074018e3d5a83016649d04df0e348fba"
@@ -2585,6 +2689,11 @@ undefsafe@^2.0.5:
resolved "https://registry.yarnpkg.com/undefsafe/-/undefsafe-2.0.5.tgz#38733b9327bdcd226db889fb723a6efd162e6e2c"
integrity sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==
+undici-types@~5.26.4:
+ version "5.26.5"
+ resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
+ integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
+
undici@^5.22.1:
version "5.23.0"
resolved "https://registry.yarnpkg.com/undici/-/undici-5.23.0.tgz#e7bdb0ed42cebe7b7aca87ced53e6eaafb8f8ca0"
@@ -2668,6 +2777,16 @@ weaviate-ts-client@^1.4.0:
isomorphic-fetch "^3.0.0"
uuid "^9.0.0"
+web-streams-polyfill@4.0.0-beta.3:
+ version "4.0.0-beta.3"
+ resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz#2898486b74f5156095e473efe989dcf185047a38"
+ integrity sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==
+
+web-streams-polyfill@^3.2.1:
+ version "3.2.1"
+ resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6"
+ integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==
+
webidl-conversions@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"