From 5d56ab623ba27cfb2875b4039bd75bd465107bbd Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Mon, 30 Oct 2023 15:44:03 -0700 Subject: [PATCH] Anthropic claude 2 support (#305) * WIP Anythropic support for chat, chat and query w/context * Add onboarding support for Anthropic * cleanup * fix Anthropic answer parsing move embedding selector to general util --- docker/.env.example | 10 + frontend/src/App.jsx | 7 + .../Modals/LegacySettings/ApiKey/index.jsx | 198 ---------- .../LegacySettings/Appearance/index.jsx | 210 ----------- .../LegacySettings/ExportImport/index.jsx | 216 ----------- .../LegacySettings/LLMSelection/index.jsx | 270 ------------- .../LegacySettings/MultiUserMode/index.jsx | 158 -------- .../PasswordProtection/index.jsx | 117 ------ .../Modals/LegacySettings/VectorDbs/index.jsx | 357 ------------------ .../Modals/LegacySettings/index.jsx | 85 ----- .../src/components/PrivateRoute/index.jsx | 2 + .../src/components/SettingsSidebar/index.jsx | 11 + .../EmbeddingPreference/index.jsx | 227 +++++++++++ .../GeneralSettings/LLMPreference/index.jsx | 79 +++- .../GeneralSettings/VectorDatabase/index.jsx | 2 +- .../Steps/EmbeddingSelection/index.jsx | 179 +++++++++ .../Steps/LLMSelection/index.jsx | 70 +++- .../OnboardingFlow/OnboardingModal/index.jsx | 7 + frontend/src/utils/paths.js | 3 + server/.env.example | 15 +- server/models/systemSettings.js | 14 + server/package.json | 3 +- server/utils/AiProviders/anthropic/index.js | 144 +++++++ server/utils/AiProviders/azureOpenAi/index.js | 100 ++--- server/utils/AiProviders/openAi/index.js | 92 ++--- .../EmbeddingEngines/azureOpenAi/index.js | 87 +++++ server/utils/EmbeddingEngines/openAi/index.js | 78 ++++ server/utils/chats/index.js | 6 +- server/utils/helpers/index.js | 29 +- server/utils/helpers/updateENV.js | 37 +- .../utils/vectorDbProviders/chroma/index.js | 33 +- server/utils/vectorDbProviders/lance/index.js | 33 +- .../utils/vectorDbProviders/pinecone/index.js | 35 +- .../utils/vectorDbProviders/qdrant/index.js | 33 +- .../utils/vectorDbProviders/weaviate/index.js | 34 +- server/yarn.lock | 119 ++++++ 36 files changed, 1200 insertions(+), 1900 deletions(-) delete mode 100644 frontend/src/components/Modals/LegacySettings/ApiKey/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/Appearance/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/ExportImport/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/LLMSelection/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/MultiUserMode/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/PasswordProtection/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/VectorDbs/index.jsx delete mode 100644 frontend/src/components/Modals/LegacySettings/index.jsx create mode 100644 frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx create mode 100644 frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx create mode 100644 server/utils/AiProviders/anthropic/index.js create mode 100644 server/utils/EmbeddingEngines/azureOpenAi/index.js create mode 100644 server/utils/EmbeddingEngines/openAi/index.js diff --git a/docker/.env.example b/docker/.env.example index b0182cc8..cef05311 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -15,6 +15,16 @@ OPEN_MODEL_PREF='gpt-3.5-turbo' # OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model. # EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 +# LLM_PROVIDER='anthropic' +# ANTHROPIC_API_KEY=sk-ant-xxxx +# ANTHROPIC_MODEL_PREF='claude-2' + +########################################### +######## Embedding API SElECTION ########## +########################################### +# Only used if you are using an LLM that does not natively support embedding (openai or Azure) +# EMBEDDING_ENGINE='openai' +# OPEN_AI_KEY=sk-xxxx ########################################### ######## Vector Database Selection ######## diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 2b48a363..98ffaf03 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -22,6 +22,9 @@ const GeneralApiKeys = lazy(() => import("./pages/GeneralSettings/ApiKeys")); const GeneralLLMPreference = lazy(() => import("./pages/GeneralSettings/LLMPreference") ); +const GeneralEmbeddingPreference = lazy(() => + import("./pages/GeneralSettings/EmbeddingPreference") +); const GeneralVectorDatabase = lazy(() => import("./pages/GeneralSettings/VectorDatabase") ); @@ -50,6 +53,10 @@ export default function App() { path="/general/llm-preference" element={} /> + } + /> } diff --git a/frontend/src/components/Modals/LegacySettings/ApiKey/index.jsx b/frontend/src/components/Modals/LegacySettings/ApiKey/index.jsx deleted file mode 100644 index 7f023180..00000000 --- a/frontend/src/components/Modals/LegacySettings/ApiKey/index.jsx +++ /dev/null @@ -1,198 +0,0 @@ -import { useEffect, useState } from "react"; -import System from "../../../../models/system"; -import PreLoader from "../../../Preloader"; -import paths from "../../../../utils/paths"; -import showToast from "../../../../utils/toast"; -import { CheckCircle, Copy, RefreshCcw, Trash } from "react-feather"; - -export default function ApiKey() { - const [loading, setLoading] = useState(true); - const [generating, setGenerating] = useState(false); - const [copied, setCopied] = useState(false); - const [deleting, setDeleting] = useState(false); - const [apiKey, setApiKey] = useState(null); - - useEffect(() => { - async function fetchExistingApiKey() { - const { apiKey: _apiKey } = await System.getApiKey(); - setApiKey(_apiKey); - setLoading(false); - } - fetchExistingApiKey(); - }, []); - - const generateApiKey = async () => { - setGenerating(true); - const isRefresh = !!apiKey; - const { apiKey: newApiKey, error } = await System.generateApiKey(); - if (!!error) { - showToast(error, "error"); - } else { - showToast( - isRefresh ? "API key regenerated!" : "API key generated!", - "info" - ); - setApiKey(newApiKey); - } - setGenerating(false); - }; - - const removeApiKey = async () => { - setDeleting(true); - const ok = await System.deleteApiKey(); - if (ok) { - showToast("API key deleted from instance.", "info"); - setApiKey(null); - } else { - showToast("API key could not be deleted.", "error"); - } - setDeleting(false); - }; - - const copyToClipboard = async () => { - window.navigator.clipboard.writeText(apiKey.secret); - showToast("API key copied to clipboard!", "info"); - setCopied(true); - setTimeout(() => { - setCopied(false); - }, 1200); - }; - - if (loading) { - return ( -
-
-
-

- Generate an API Key for your AnythingLLM instance. -

-
-
- -
-
-
- ); - } - - if (!apiKey) { - return ( -
-
-
-

- Generate an API Key for your AnythingLLM instance. -

-
-
-
-

- No api key for this instance exists. Create one by clicking the - button below. -

- - View endpoint documentation → - -
- -
-
-
- ); - } - - return ( -
-
-
-

- Use this API key for interacting with your AnythingLLM instance - programmatically. -

- - View endpoint documentation → - -
- -
-
-
-
- - - - -
-
-
-
-
-
- ); -} diff --git a/frontend/src/components/Modals/LegacySettings/Appearance/index.jsx b/frontend/src/components/Modals/LegacySettings/Appearance/index.jsx deleted file mode 100644 index 4ce1078b..00000000 --- a/frontend/src/components/Modals/LegacySettings/Appearance/index.jsx +++ /dev/null @@ -1,210 +0,0 @@ -import { useEffect, useState } from "react"; -import useLogo from "../../../../hooks/useLogo"; -import usePrefersDarkMode from "../../../../hooks/usePrefersDarkMode"; -import System from "../../../../models/system"; -import EditingChatBubble from "../../../EditingChatBubble"; -import AnythingLLM from "../../../../media/logo/anything-llm.png"; -import showToast from "../../../../utils/toast"; - -export default function Appearance() { - const { logo: _initLogo } = useLogo(); - const prefersDarkMode = usePrefersDarkMode(); - const [logo, setLogo] = useState(""); - const [hasChanges, setHasChanges] = useState(false); - const [messages, setMessages] = useState([]); - - useEffect(() => { - async function fetchMessages() { - const messages = await System.getWelcomeMessages(); - setMessages(messages); - } - fetchMessages(); - }, []); - - useEffect(() => { - async function setInitLogo() { - setLogo(_initLogo || ""); - } - setInitLogo(); - }, [_initLogo]); - - const handleFileUpload = async (event) => { - const file = event.target.files[0]; - if (!file) return false; - - const formData = new FormData(); - formData.append("logo", file); - const { success, error } = await System.uploadLogo(formData); - if (!success) { - console.error("Failed to upload logo:", error); - showToast(`Failed to upload logo: ${error}`, "error"); - return; - } - - const logoURL = await System.fetchLogo(); - setLogo(logoURL); - showToast("Image uploaded successfully.", "success"); - }; - - const handleRemoveLogo = async () => { - const { success, error } = await System.removeCustomLogo(); - if (!success) { - console.error("Failed to remove logo:", error); - showToast(`Failed to remove logo: ${error}`, "error"); - return; - } - - const logoURL = await System.fetchLogo(); - setLogo(logoURL); - showToast("Image successfully removed.", "success"); - }; - - const addMessage = (type) => { - if (type === "user") { - setMessages([ - ...messages, - { user: "Double click to edit...", response: "" }, - ]); - } else { - setMessages([ - ...messages, - { user: "", response: "Double click to edit..." }, - ]); - } - }; - - const removeMessage = (index) => { - setHasChanges(true); - setMessages(messages.filter((_, i) => i !== index)); - }; - - const handleMessageChange = (index, type, value) => { - setHasChanges(true); - const newMessages = [...messages]; - newMessages[index][type] = value; - setMessages(newMessages); - }; - - const handleMessageSave = async () => { - const { success, error } = await System.setWelcomeMessages(messages); - if (!success) { - showToast(`Failed to update welcome messages: ${error}`, "error"); - return; - } - showToast("Successfully updated welcome messages.", "success"); - setHasChanges(false); - }; - - return ( -
-
-
-

- Customize the appearance settings of AnythingLLM instance. -

-
- -
-
-
-

- Custom Logo -

-

- Change the logo that appears in the sidebar. -

-
-
- Uploaded Logo (e.target.src = AnythingLLM)} - /> -
-
- - -
-
- Upload your logo. Recommended size: 800x200. -
-
-
-
-
-
-

- Custom Messages -

-

- Change the default messages that are displayed to the users. -

-
-
- {messages.map((message, index) => ( -
- {message.user && ( - - )} - {message.response && ( - - )} -
- ))} -
- - -
-
- {hasChanges && ( -
- -
- )} -
-
-
-
- ); -} diff --git a/frontend/src/components/Modals/LegacySettings/ExportImport/index.jsx b/frontend/src/components/Modals/LegacySettings/ExportImport/index.jsx deleted file mode 100644 index e2245d53..00000000 --- a/frontend/src/components/Modals/LegacySettings/ExportImport/index.jsx +++ /dev/null @@ -1,216 +0,0 @@ -import React, { useState, useEffect, useRef } from "react"; -import { AlertCircle, CheckCircle, Download, Loader, X } from "react-feather"; -import System from "../../../../models/system"; -import { API_BASE } from "../../../../utils/constants"; -import paths from "../../../../utils/paths"; - -const noop = () => false; -export default function ExportOrImportData({ hideModal = noop }) { - return ( -
-
-
-

- Have multiple AnythingLLM instances or simply want to backup or - re-import data from another instance? You can do so here. -
- - This will not automatically sync your vector database embeddings! - -

- - View previous exports → - -
-
-
- -
- -
-
-
- -
-
-
- ); -} - -function ExportData() { - const [loading, setLoading] = useState(false); - const [result, setResult] = useState(null); - const [error, setError] = useState(null); - const exportData = async function () { - setLoading(true); - const { filename, error } = await System.dataExport(); - setLoading(false); - - if (!filename) { - setError(error); - } else { - setResult(filename); - const link = document.createElement("a"); - link.href = `${API_BASE}/system/data-exports/${filename}`; - link.target = "_blank"; - document.body.appendChild(link); - link.click(); - } - }; - - if (loading) { - return ( -
-

Exporting....

-

- A download will start automatically. -

-
- ); - } - - if (error) { - return ( - - ); - } - - if (!!result) { - return ( - - -

Download Data Export

-
- ); - } - - return ( - - ); -} - -function ImportData() { - const inputRef = useRef(null); - const [loading, setLoading] = useState(false); - const [file, setFile] = useState(null); - const [result, setResult] = useState(null); - const [error, setError] = useState(null); - const startInput = () => inputRef?.current?.click(); - const handleUpload = async (e) => { - e.preventDefault(); - setError(null); - setFile(null); - setResult(null); - - const file = e.target.files?.[0]; - if (!file) { - setError("Invalid file upload"); - return false; - } - - setFile(file); - setLoading(true); - const formData = new FormData(); - formData.append("file", file, file.name); - const { success, error } = await System.importData(formData); - if (!success) { - setError(error); - } else { - setResult(true); - } - - setLoading(false); - setFile(null); - }; - - if (loading) { - return ( -
-

Importing....

-

{file.name}

-
- ); - } - - if (error) { - return ( - - ); - } - - if (!!result) { - return ( -
-
- -

- Import was completed successfully -

-
-

- please reload the page to see the results of the import. -

-
- ); - } - - return ( - <> - - - - ); -} diff --git a/frontend/src/components/Modals/LegacySettings/LLMSelection/index.jsx b/frontend/src/components/Modals/LegacySettings/LLMSelection/index.jsx deleted file mode 100644 index 7fd07516..00000000 --- a/frontend/src/components/Modals/LegacySettings/LLMSelection/index.jsx +++ /dev/null @@ -1,270 +0,0 @@ -import React, { useState } from "react"; -import System from "../../../../models/system"; -import OpenAiLogo from "../../../../media/llmprovider/openai.png"; -import AzureOpenAiLogo from "../../../../media/llmprovider/azure.png"; -import AnthropicLogo from "../../../../media/llmprovider/anthropic.png"; -import showToast from "../../../../utils/toast"; - -const noop = () => false; -export default function LLMSelection({ - hideModal = noop, - user, - settings = {}, -}) { - const [hasChanges, setHasChanges] = useState(false); - const [llmChoice, setLLMChoice] = useState(settings?.LLMProvider || "openai"); - const [saving, setSaving] = useState(false); - const canDebug = settings.MultiUserMode - ? settings?.CanDebug && user?.role === "admin" - : settings?.CanDebug; - - function updateLLMChoice(selection) { - if (!canDebug || selection === llmChoice) return false; - setHasChanges(true); - setLLMChoice(selection); - } - - const handleSubmit = async (e) => { - e.preventDefault(); - setSaving(true); - const data = {}; - const form = new FormData(e.target); - for (var [key, value] of form.entries()) data[key] = value; - const { error } = await System.updateSystem(data); - if (error) { - showToast(`Failed to save LLM settings: ${error}`, "error"); - } else { - showToast("LLM settings saved successfully.", "success"); - } - setSaving(false); - setHasChanges(!!error ? true : false); - }; - return ( -
-
-
-

- These are the credentials and settings for your preferred LLM chat & - embedding provider. Its important these keys are current and correct - or else AnythingLLM will not function properly. -

-
- -
setHasChanges(true)}> -
-
-

- LLM providers -

-
- - - - -
- {llmChoice === "openai" && ( - <> -
- - -
- -
- - -
- - )} - - {llmChoice === "azure" && ( - <> -
- - -
- -
- - -
- -
- - -
- -
- - -
- - )} - - {llmChoice === "anthropic-claude-2" && ( -
-

- This provider is unavailable and cannot be used in - AnythingLLM currently. -

-
- )} -
-
-
- -
-
-
- -
-
-
- ); -} - -const LLMProviderOption = ({ - name, - link, - description, - value, - image, - checked = false, - onClick, -}) => { - return ( -
onClick(value)}> - - -
- ); -}; diff --git a/frontend/src/components/Modals/LegacySettings/MultiUserMode/index.jsx b/frontend/src/components/Modals/LegacySettings/MultiUserMode/index.jsx deleted file mode 100644 index c54879fe..00000000 --- a/frontend/src/components/Modals/LegacySettings/MultiUserMode/index.jsx +++ /dev/null @@ -1,158 +0,0 @@ -import React, { useState } from "react"; -import System from "../../../../models/system"; -import { - AUTH_TIMESTAMP, - AUTH_TOKEN, - AUTH_USER, -} from "../../../../utils/constants"; -import paths from "../../../../utils/paths"; - -const noop = () => false; -export default function MultiUserMode({ hideModal = noop }) { - const [saving, setSaving] = useState(false); - const [success, setSuccess] = useState(false); - const [error, setError] = useState(null); - const [useMultiUserMode, setUseMultiUserMode] = useState(false); - - const handleSubmit = async (e) => { - e.preventDefault(); - setSaving(true); - setSuccess(false); - setError(null); - - const form = new FormData(e.target); - const data = { - username: form.get("username"), - password: form.get("password"), - }; - - const { success, error } = await System.setupMultiUser(data); - if (success) { - setSuccess(true); - setSaving(false); - setTimeout(() => { - window.localStorage.removeItem(AUTH_USER); - window.localStorage.removeItem(AUTH_TOKEN); - window.localStorage.removeItem(AUTH_TIMESTAMP); - window.location = paths.admin.users(); - }, 2_000); - return; - } - - setError(error); - setSaving(false); - }; - - return ( -
-
-
-

- Update your AnythingLLM instance to support multiple concurrent - users with their own workspaces. As the admin you can view all - workspaces and add people into workspaces as well. This change is - not reversible and will permanently alter your AnythingLLM - installation. -

-
- {(error || success) && ( -
- {error && ( -
- {error} -
- )} - {success && ( -
- Your page will refresh in a few seconds. -
- )} -
- )} -
-
-
-
- - - -
-
- {useMultiUserMode && ( - <> -

- By default, you will be the only admin. As an admin you - will need to create accounts for all new users or admins. - Do not lose your password as only an Admin user can reset - passwords. -

-
- - -
-
- - -
- - - )} -
-
-
-
-
- -
-
-
- ); -} diff --git a/frontend/src/components/Modals/LegacySettings/PasswordProtection/index.jsx b/frontend/src/components/Modals/LegacySettings/PasswordProtection/index.jsx deleted file mode 100644 index a9de2cd6..00000000 --- a/frontend/src/components/Modals/LegacySettings/PasswordProtection/index.jsx +++ /dev/null @@ -1,117 +0,0 @@ -import React, { useState } from "react"; -import System from "../../../../models/system"; -import { - AUTH_TIMESTAMP, - AUTH_TOKEN, - AUTH_USER, -} from "../../../../utils/constants"; -import showToast from "../../../../utils/toast"; - -const noop = () => false; -export default function PasswordProtection({ - hideModal = noop, - settings = {}, -}) { - const [saving, setSaving] = useState(false); - const [usePassword, setUsePassword] = useState(settings?.RequiresAuth); - - const handleSubmit = async (e) => { - e.preventDefault(); - setSaving(true); - - const form = new FormData(e.target); - const data = { - usePassword, - newPassword: form.get("password"), - }; - - const { success, error } = await System.updateSystemPassword(data); - if (success) { - showToast("Your page will refresh in a few seconds.", "success"); - setSaving(false); - setTimeout(() => { - window.localStorage.removeItem(AUTH_USER); - window.localStorage.removeItem(AUTH_TOKEN); - window.localStorage.removeItem(AUTH_TIMESTAMP); - window.location.reload(); - }, 3_000); - return; - } else { - showToast(`Failed to update password: ${error}`, "error"); - } - - setSaving(false); - }; - - return ( -
-
-
-

- Protect your AnythingLLM instance with a password. If you forget - this there is no recovery method so ensure you save this password. -

-
-
-
-
-
- - - -
-
- {usePassword && ( -
- - -
- )} - -
-
-
-
-
- -
-
-
- ); -} diff --git a/frontend/src/components/Modals/LegacySettings/VectorDbs/index.jsx b/frontend/src/components/Modals/LegacySettings/VectorDbs/index.jsx deleted file mode 100644 index 09c60b3a..00000000 --- a/frontend/src/components/Modals/LegacySettings/VectorDbs/index.jsx +++ /dev/null @@ -1,357 +0,0 @@ -import React, { useState } from "react"; -import System from "../../../../models/system"; -import ChromaLogo from "../../../../media/vectordbs/chroma.png"; -import PineconeLogo from "../../../../media/vectordbs/pinecone.png"; -import LanceDbLogo from "../../../../media/vectordbs/lancedb.png"; -import WeaviateLogo from "../../../../media/vectordbs/weaviate.png"; -import QDrantLogo from "../../../../media/vectordbs/qdrant.png"; - -const noop = () => false; -export default function VectorDBSelection({ - hideModal = noop, - user, - settings = {}, -}) { - const [hasChanges, setHasChanges] = useState(false); - const [vectorDB, setVectorDB] = useState(settings?.VectorDB || "lancedb"); - const [saving, setSaving] = useState(false); - const [error, setError] = useState(null); - const canDebug = settings.MultiUserMode - ? settings?.CanDebug && user?.role === "admin" - : settings?.CanDebug; - - function updateVectorChoice(selection) { - if (!canDebug || selection === vectorDB) return false; - setHasChanges(true); - setVectorDB(selection); - } - - const handleSubmit = async (e) => { - e.preventDefault(); - setSaving(true); - setError(null); - const data = {}; - const form = new FormData(e.target); - for (var [key, value] of form.entries()) data[key] = value; - const { error } = await System.updateSystem(data); - setError(error); - setSaving(false); - setHasChanges(!!error ? true : false); - }; - return ( -
-
-
-

- These are the credentials and settings for how your AnythingLLM - instance will function. Its important these keys are current and - correct. -

-
- - {!!error && ( -
-

{error}

-
- )} - -
setHasChanges(true)}> -
-
-

- Vector database providers -

-
- - - - - - -
- {vectorDB === "pinecone" && ( - <> -
- - -
- -
- - -
- -
- - -
- - )} - - {vectorDB === "chroma" && ( - <> -
- - -
- -
-
- -

- If your hosted Chroma instance is protected by an API - key - enter the header and api key here. -

-
-
- - -
-
- - )} - {vectorDB === "lancedb" && ( -
-

- There is no configuration needed for LanceDB. -

-
- )} - {vectorDB === "qdrant" && ( - <> -
- - -
-
- - -
- - )} - {vectorDB === "weaviate" && ( - <> -
- - -
-
- - -
- - )} -
-
-
- -
-
-
- -
-
-
- ); -} - -const VectorDBOption = ({ - name, - link, - description, - value, - image, - checked = false, - onClick, -}) => { - return ( -
onClick(value)}> - - -
- ); -}; diff --git a/frontend/src/components/Modals/LegacySettings/index.jsx b/frontend/src/components/Modals/LegacySettings/index.jsx deleted file mode 100644 index a531c507..00000000 --- a/frontend/src/components/Modals/LegacySettings/index.jsx +++ /dev/null @@ -1,85 +0,0 @@ -import React, { useEffect, useState } from "react"; -import { X } from "react-feather"; -import ExportOrImportData from "./ExportImport"; -import PasswordProtection from "./PasswordProtection"; -import System from "../../../models/system"; -import MultiUserMode from "./MultiUserMode"; -import useUser from "../../../hooks/useUser"; -import VectorDBSelection from "./VectorDbs"; -import LLMSelection from "./LLMSelection"; -import Appearance from "./Appearance"; -import ApiKey from "./ApiKey"; - -export const TABS = { - llm: LLMSelection, - exportimport: ExportOrImportData, - password: PasswordProtection, - multiuser: MultiUserMode, - vectordb: VectorDBSelection, - appearance: Appearance, - apikey: ApiKey, -}; - -const noop = () => false; -export default function SystemSettingsModal({ tab = null, hideModal = noop }) { - const { user } = useUser(); - const [loading, setLoading] = useState(true); - const [settings, setSettings] = useState(null); - const Component = TABS[tab || "llm"]; - - useEffect(() => { - async function fetchKeys() { - const _settings = await System.keys(); - setSettings(_settings); - setLoading(false); - } - fetchKeys(); - }, []); - - return ( -
-
-
-
-
-
-

- System Settings -

- -
-
- {loading ? ( -
-
-
- ) : ( - - )} -
-
-
- ); -} - -export function useSystemSettingsModal() { - const [showing, setShowing] = useState(false); - const showModal = () => { - setShowing(true); - }; - const hideModal = () => { - setShowing(false); - }; - - return { showing, showModal, hideModal }; -} diff --git a/frontend/src/components/PrivateRoute/index.jsx b/frontend/src/components/PrivateRoute/index.jsx index 1c403f81..e8db5400 100644 --- a/frontend/src/components/PrivateRoute/index.jsx +++ b/frontend/src/components/PrivateRoute/index.jsx @@ -21,6 +21,7 @@ function useIsAuthenticated() { MultiUserMode, RequiresAuth, OpenAiKey = false, + AnthropicApiKey = false, AzureOpenAiKey = false, } = await System.keys(); @@ -29,6 +30,7 @@ function useIsAuthenticated() { !MultiUserMode && !RequiresAuth && // Not in Multi-user AND no password set. !OpenAiKey && + !AnthropicApiKey && !AzureOpenAiKey // AND no LLM API Key set at all. ) { setShouldRedirectToOnboarding(true); diff --git a/frontend/src/components/SettingsSidebar/index.jsx b/frontend/src/components/SettingsSidebar/index.jsx index 08bb8461..c50ae878 100644 --- a/frontend/src/components/SettingsSidebar/index.jsx +++ b/frontend/src/components/SettingsSidebar/index.jsx @@ -21,6 +21,7 @@ import { House, X, List, + FileCode, } from "@phosphor-icons/react"; import useUser from "../../hooks/useUser"; import { USER_BACKGROUND_COLOR } from "../../utils/constants"; @@ -115,6 +116,11 @@ export default function SettingsSidebar() { btnText="LLM Preference" icon={} /> +
@@ -206,7 +209,7 @@ export default function GeneralLLMPreference() {
)} - {llmChoice === "anthropic-claude-2" && ( -
-

- This provider is unavailable and cannot be used in - AnythingLLM currently. -

+ {llmChoice === "anthropic" && ( +
+
+
+ +

+ Anthropic as your LLM requires you to set an embedding + service to use. +

+
+ + Manage embedding → + +
+
+
+ + +
+ +
+ + +
+
)}
diff --git a/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx b/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx index c7620a5e..94d6d7b5 100644 --- a/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx +++ b/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx @@ -48,7 +48,7 @@ export default function GeneralVectorDatabase() { showToast("Settings saved successfully.", "success"); } setSaving(false); - setHasChanges(!!error ? true : false); + setHasChanges(!!error); }; return ( diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx new file mode 100644 index 00000000..aadf3798 --- /dev/null +++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx @@ -0,0 +1,179 @@ +import React, { memo, useEffect, useState } from "react"; +import OpenAiLogo from "../../../../../media/llmprovider/openai.png"; +import AzureOpenAiLogo from "../../../../../media/llmprovider/azure.png"; +import System from "../../../../../models/system"; +import PreLoader from "../../../../../components/Preloader"; +import LLMProviderOption from "../../../../../components/LLMProviderOption"; + +function EmbeddingSelection({ nextStep, prevStep, currentStep, goToStep }) { + const [embeddingChoice, setEmbeddingChoice] = useState("openai"); + const [llmChoice, setLLMChoice] = useState("openai"); + + const [settings, setSettings] = useState(null); + const [loading, setLoading] = useState(true); + + const updateChoice = (selection) => { + setEmbeddingChoice(selection); + }; + + useEffect(() => { + async function fetchKeys() { + const _settings = await System.keys(); + setSettings(_settings); + setEmbeddingChoice(_settings?.EmbeddingEngine || "openai"); + setLoading(false); + } + fetchKeys(); + }, [currentStep]); + + const handleSubmit = async (e) => { + e.preventDefault(); + const form = e.target; + const data = {}; + const formData = new FormData(form); + for (var [key, value] of formData.entries()) data[key] = value; + const { error } = await System.updateSystem(data); + if (error) { + alert(`Failed to save LLM settings: ${error}`, "error"); + return; + } + goToStep(2); + return; + }; + + if (loading) + return ( +
+ +
+ ); + + return ( +
+
+
+
+ Embedding Provider +
+
+ + + +
+
+ {embeddingChoice === "openai" && ( + <> +
+ + +
+ + )} + + {embeddingChoice === "azure" && ( + <> +
+ + +
+ +
+ + +
+ +
+ + +
+ + )} +
+
+
+ + +
+
+
+ ); +} + +export default memo(EmbeddingSelection); diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx index 70d95785..b3d141f6 100644 --- a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx +++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx @@ -7,7 +7,7 @@ import System from "../../../../../models/system"; import PreLoader from "../../../../../components/Preloader"; import LLMProviderOption from "../../../../../components/LLMProviderOption"; -function LLMSelection({ nextStep, prevStep, currentStep }) { +function LLMSelection({ nextStep, prevStep, currentStep, goToStep }) { const [llmChoice, setLLMChoice] = useState("openai"); const [settings, setSettings] = useState(null); const [loading, setLoading] = useState(true); @@ -40,7 +40,13 @@ function LLMSelection({ nextStep, prevStep, currentStep }) { alert(`Failed to save LLM settings: ${error}`, "error"); return; } - nextStep(); + + switch (data.LLMProvider) { + case "anthropic": + goToStep(7); + default: + nextStep(); + } return; }; @@ -59,7 +65,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) { LLM Providers
- +
@@ -166,7 +173,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
)} - {llmChoice === "anthropic-claude-2" && ( -
-

- This provider is unavailable and cannot be used in AnythingLLM - currently. -

+ {llmChoice === "anthropic" && ( +
+
+
+ + +
+ +
+ + +
+
)}
diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx index 0cdf05fa..a412ecc5 100644 --- a/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx +++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/index.jsx @@ -7,6 +7,7 @@ import UserModeSelection from "./Steps/UserModeSelection"; import PasswordProtection from "./Steps/PasswordProtection"; import MultiUserSetup from "./Steps/MultiUserSetup"; import CreateFirstWorkspace from "./Steps/CreateFirstWorkspace"; +import EmbeddingSelection from "./Steps/EmbeddingSelection"; const DIALOG_ID = "onboarding-modal"; @@ -54,6 +55,12 @@ const STEPS = { description: "To get started, create a new workspace.", component: CreateFirstWorkspace, }, + 8: { + title: "Embedding Preference", + description: + "Due to your LLM selection you need to set up a provider for embedding files and text.", + component: EmbeddingSelection, + }, }; export const OnboardingModalId = DIALOG_ID; diff --git a/frontend/src/utils/paths.js b/frontend/src/utils/paths.js index f914f63b..1d77abe5 100644 --- a/frontend/src/utils/paths.js +++ b/frontend/src/utils/paths.js @@ -43,6 +43,9 @@ export default { llmPreference: () => { return "/general/llm-preference"; }, + embeddingPreference: () => { + return "/general/embedding-preference"; + }, vectorDatabase: () => { return "/general/vector-database"; }, diff --git a/server/.env.example b/server/.env.example index 2f6ef041..d7a9cbe7 100644 --- a/server/.env.example +++ b/server/.env.example @@ -5,9 +5,9 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea ########################################### ######## LLM API SElECTION ################ ########################################### -LLM_PROVIDER='openai' +# LLM_PROVIDER='openai' # OPEN_AI_KEY= -OPEN_MODEL_PREF='gpt-3.5-turbo' +# OPEN_MODEL_PREF='gpt-3.5-turbo' # LLM_PROVIDER='azure' # AZURE_OPENAI_ENDPOINT= @@ -15,6 +15,17 @@ OPEN_MODEL_PREF='gpt-3.5-turbo' # OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model. # EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 +# LLM_PROVIDER='anthropic' +# ANTHROPIC_API_KEY=sk-ant-xxxx +# ANTHROPIC_MODEL_PREF='claude-2' + +########################################### +######## Embedding API SElECTION ########## +########################################### +# Only used if you are using an LLM that does not natively support embedding (openai or Azure) +# EMBEDDING_ENGINE='openai' +# OPEN_AI_KEY=sk-xxxx + ########################################### ######## Vector Database Selection ######## ########################################### diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index 836d5907..4d2f73b3 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -24,6 +24,7 @@ const SystemSettings = { StorageDir: process.env.STORAGE_DIR, MultiUserMode: await this.isMultiUserMode(), VectorDB: vectorDB, + EmbeddingEngine: process.env.EMBEDDING_ENGINE, ...(vectorDB === "pinecone" ? { PineConeEnvironment: process.env.PINECONE_ENVIRONMENT, @@ -66,6 +67,19 @@ const SystemSettings = { AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, } : {}), + + ...(llmProvider === "anthropic" + ? { + AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY, + AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2", + + // For embedding credentials when Anthropic is selected. + OpenAiKey: !!process.env.OPEN_AI_KEY, + AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT, + AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY, + AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, + } + : {}), }; }, diff --git a/server/package.json b/server/package.json index eb2e8470..62879b83 100644 --- a/server/package.json +++ b/server/package.json @@ -20,6 +20,7 @@ "seed": "node prisma/seed.js" }, "dependencies": { + "@anthropic-ai/sdk": "^0.8.1", "@azure/openai": "^1.0.0-beta.3", "@googleapis/youtube": "^9.0.0", "@pinecone-database/pinecone": "^0.1.6", @@ -59,4 +60,4 @@ "nodemon": "^2.0.22", "prettier": "^2.4.1" } -} \ No newline at end of file +} diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js new file mode 100644 index 00000000..d3dd68f2 --- /dev/null +++ b/server/utils/AiProviders/anthropic/index.js @@ -0,0 +1,144 @@ +const { v4 } = require("uuid"); +const { chatPrompt } = require("../../chats"); + +class AnthropicLLM { + constructor(embedder = null) { + if (!process.env.ANTHROPIC_API_KEY) + throw new Error("No Anthropic API key was set."); + + // Docs: https://www.npmjs.com/package/@anthropic-ai/sdk + const AnthropicAI = require("@anthropic-ai/sdk"); + const anthropic = new AnthropicAI({ + apiKey: process.env.ANTHROPIC_API_KEY, + }); + this.anthropic = anthropic; + + if (!embedder) + throw new Error( + "INVALID ANTHROPIC SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Anthropic as your LLM." + ); + this.embedder = embedder; + this.answerKey = v4().split("-")[0]; + } + + isValidChatModel(modelName = "") { + const validModels = ["claude-2"]; + return validModels.includes(modelName); + } + + // Moderation can be done with Anthropic, but its not really "exact" so we skip it + // https://docs.anthropic.com/claude/docs/content-moderation + async isSafe(_input = "") { + // Not implemented so must be stubbed + return { safe: true, reasons: [] }; + } + + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + return `\n\nHuman: Please read question supplied within the tags. Using all information generate an answer to the question and output it within <${ + this.answerKey + }> tags. Previous conversations can be used within the tags and can be used to influence the output. Content between the tag is additional information and instruction that will impact how answers are formatted or responded to. Additional contextual information retrieved to help answer the users specific query is available to use for answering and can be found between tags. When no tags may are present use the knowledge available and in the conversation to answer. When one or more tags are available you will use those to help answer the question or augment pre-existing knowledge. You should never say "Based on the provided context" or other phrasing that is not related to the user question. + ${systemPrompt} + ${contextTexts + .map((text, i) => { + return `${text}\n`; + }) + .join("")} + ${chatHistory.map((history) => { + switch (history.role) { + case "assistant": + return `\n\nAssistant: ${history.content}`; + case "user": + return `\n\nHuman: ${history.content}`; + default: + return "\n"; + } + })} + ${userPrompt} + \n\nAssistant:`; + } + + // This is the interface used when no embeddings are present in the workspace + // This is just having a conversation with the LLM as one would normally. + async sendChat(chatHistory = [], prompt, workspace = {}) { + const model = process.env.ANTHROPIC_MODEL_PREF || "claude-2"; + if (!this.isValidChatModel(model)) + throw new Error( + `Anthropic chat: ${model} is not valid for chat completion!` + ); + + const { content, error } = await this.anthropic.completions + .create({ + model: "claude-2", + max_tokens_to_sample: 300, + prompt: this.constructPrompt({ + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }), + }) + .then((res) => { + const { completion } = res; + const re = new RegExp( + "(?:<" + this.answerKey + ">)([\\s\\S]*)(?:)" + ); + const response = completion.match(re)?.[1]?.trim(); + if (!response) + throw new Error("Anthropic: No response could be parsed."); + return { content: response, error: null }; + }) + .catch((e) => { + return { content: null, error: e.message }; + }); + + if (error) throw new Error(error); + return content; + } + + async getChatCompletion(prompt = "", _opts = {}) { + const model = process.env.ANTHROPIC_MODEL_PREF || "claude-2"; + if (!this.isValidChatModel(model)) + throw new Error( + `Anthropic chat: ${model} is not valid for chat completion!` + ); + + const { content, error } = await this.anthropic.completions + .create({ + model: "claude-2", + max_tokens_to_sample: 300, + prompt, + }) + .then((res) => { + const { completion } = res; + const re = new RegExp( + "(?:<" + this.answerKey + ">)([\\s\\S]*)(?:)" + ); + const response = completion.match(re)?.[1]?.trim(); + if (!response) + throw new Error("Anthropic: No response could be parsed."); + return { content: response, error: null }; + }) + .catch((e) => { + return { content: null, error: e.message }; + }); + + if (error) throw new Error(error); + return content; + } + + // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations + async embedTextInput(textInput) { + return await this.embedder.embedTextInput(textInput); + } + async embedChunks(textChunks = []) { + return await this.embedder.embedChunks(textChunks); + } +} + +module.exports = { + AnthropicLLM, +}; diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js index abb459fb..6c450c5d 100644 --- a/server/utils/AiProviders/azureOpenAi/index.js +++ b/server/utils/AiProviders/azureOpenAi/index.js @@ -1,17 +1,18 @@ -const { toChunks } = require("../../helpers"); +const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi"); -class AzureOpenAi { +class AzureOpenAiLLM extends AzureOpenAiEmbedder { constructor() { + super(); const { OpenAIClient, AzureKeyCredential } = require("@azure/openai"); - const openai = new OpenAIClient( + if (!process.env.AZURE_OPENAI_ENDPOINT) + throw new Error("No Azure API endpoint was set."); + if (!process.env.AZURE_OPENAI_KEY) + throw new Error("No Azure API key was set."); + + this.openai = new OpenAIClient( process.env.AZURE_OPENAI_ENDPOINT, new AzureKeyCredential(process.env.AZURE_OPENAI_KEY) ); - this.openai = openai; - - // The maximum amount of "inputs" that OpenAI API can process in a single call. - // https://learn.microsoft.com/en-us/azure/ai-services/openai/faq#i-am-trying-to-use-embeddings-and-received-the-error--invalidrequesterror--too-many-inputs--the-max-number-of-inputs-is-1---how-do-i-fix-this-:~:text=consisting%20of%20up%20to%2016%20inputs%20per%20API%20request - this.embeddingChunkLimit = 16; } isValidChatModel(_modelName = "") { @@ -21,6 +22,25 @@ class AzureOpenAi { return true; } + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + const prompt = { + role: "system", + content: `${systemPrompt} + Context: + ${contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("")}`, + }; + return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; + } + async isSafe(_input = "") { // Not implemented by Azure OpenAI so must be stubbed return { safe: true, reasons: [] }; @@ -75,70 +95,8 @@ class AzureOpenAi { if (!data.hasOwnProperty("choices")) return null; return data.choices[0].message.content; } - - async embedTextInput(textInput) { - const result = await this.embedChunks(textInput); - return result?.[0] || []; - } - - async embedChunks(textChunks = []) { - const textEmbeddingModel = - process.env.EMBEDDING_MODEL_PREF || "text-embedding-ada-002"; - if (!textEmbeddingModel) - throw new Error( - "No EMBEDDING_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an embedding model." - ); - - // Because there is a limit on how many chunks can be sent at once to Azure OpenAI - // we concurrently execute each max batch of text chunks possible. - // Refer to constructor embeddingChunkLimit for more info. - const embeddingRequests = []; - for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) { - embeddingRequests.push( - new Promise((resolve) => { - this.openai - .getEmbeddings(textEmbeddingModel, chunk) - .then((res) => { - resolve({ data: res.data, error: null }); - }) - .catch((e) => { - resolve({ data: [], error: e?.error }); - }); - }) - ); - } - - const { data = [], error = null } = await Promise.all( - embeddingRequests - ).then((results) => { - // If any errors were returned from Azure abort the entire sequence because the embeddings - // will be incomplete. - const errors = results - .filter((res) => !!res.error) - .map((res) => res.error) - .flat(); - if (errors.length > 0) { - return { - data: [], - error: `(${errors.length}) Embedding Errors! ${errors - .map((error) => `[${error.type}]: ${error.message}`) - .join(", ")}`, - }; - } - return { - data: results.map((res) => res?.data || []).flat(), - error: null, - }; - }); - - if (!!error) throw new Error(`Azure OpenAI Failed to embed: ${error}`); - return data.length > 0 && - data.every((embd) => embd.hasOwnProperty("embedding")) - ? data.map((embd) => embd.embedding) - : null; - } } module.exports = { - AzureOpenAi, + AzureOpenAiLLM, }; diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index dc7e47c6..d4d54bd6 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -1,16 +1,15 @@ -const { toChunks } = require("../../helpers"); +const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); -class OpenAi { +class OpenAiLLM extends OpenAiEmbedder { constructor() { + super(); const { Configuration, OpenAIApi } = require("openai"); + if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set."); + const config = new Configuration({ apiKey: process.env.OPEN_AI_KEY, }); - const openai = new OpenAIApi(config); - this.openai = openai; - - // Arbitrary limit to ensure we stay within reasonable POST request size. - this.embeddingChunkLimit = 1_000; + this.openai = new OpenAIApi(config); } isValidChatModel(modelName = "") { @@ -18,6 +17,25 @@ class OpenAi { return validModels.includes(modelName); } + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + const prompt = { + role: "system", + content: `${systemPrompt} + Context: + ${contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("")}`, + }; + return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; + } + async isSafe(input = "") { const { flagged = false, categories = {} } = await this.openai .createModeration({ input }) @@ -97,66 +115,8 @@ class OpenAi { if (!data.hasOwnProperty("choices")) return null; return data.choices[0].message.content; } - - async embedTextInput(textInput) { - const result = await this.embedChunks(textInput); - return result?.[0] || []; - } - - async embedChunks(textChunks = []) { - // Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb) - // we concurrently execute each max batch of text chunks possible. - // Refer to constructor embeddingChunkLimit for more info. - const embeddingRequests = []; - for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) { - embeddingRequests.push( - new Promise((resolve) => { - this.openai - .createEmbedding({ - model: "text-embedding-ada-002", - input: chunk, - }) - .then((res) => { - resolve({ data: res.data?.data, error: null }); - }) - .catch((e) => { - resolve({ data: [], error: e?.error }); - }); - }) - ); - } - - const { data = [], error = null } = await Promise.all( - embeddingRequests - ).then((results) => { - // If any errors were returned from OpenAI abort the entire sequence because the embeddings - // will be incomplete. - const errors = results - .filter((res) => !!res.error) - .map((res) => res.error) - .flat(); - if (errors.length > 0) { - return { - data: [], - error: `(${errors.length}) Embedding Errors! ${errors - .map((error) => `[${error.type}]: ${error.message}`) - .join(", ")}`, - }; - } - return { - data: results.map((res) => res?.data || []).flat(), - error: null, - }; - }); - - if (!!error) throw new Error(`OpenAI Failed to embed: ${error}`); - return data.length > 0 && - data.every((embd) => embd.hasOwnProperty("embedding")) - ? data.map((embd) => embd.embedding) - : null; - } } module.exports = { - OpenAi, + OpenAiLLM, }; diff --git a/server/utils/EmbeddingEngines/azureOpenAi/index.js b/server/utils/EmbeddingEngines/azureOpenAi/index.js new file mode 100644 index 00000000..554538fa --- /dev/null +++ b/server/utils/EmbeddingEngines/azureOpenAi/index.js @@ -0,0 +1,87 @@ +const { toChunks } = require("../../helpers"); + +class AzureOpenAiEmbedder { + constructor() { + const { OpenAIClient, AzureKeyCredential } = require("@azure/openai"); + if (!process.env.AZURE_OPENAI_ENDPOINT) + throw new Error("No Azure API endpoint was set."); + if (!process.env.AZURE_OPENAI_KEY) + throw new Error("No Azure API key was set."); + + const openai = new OpenAIClient( + process.env.AZURE_OPENAI_ENDPOINT, + new AzureKeyCredential(process.env.AZURE_OPENAI_KEY) + ); + this.openai = openai; + + // The maximum amount of "inputs" that OpenAI API can process in a single call. + // https://learn.microsoft.com/en-us/azure/ai-services/openai/faq#i-am-trying-to-use-embeddings-and-received-the-error--invalidrequesterror--too-many-inputs--the-max-number-of-inputs-is-1---how-do-i-fix-this-:~:text=consisting%20of%20up%20to%2016%20inputs%20per%20API%20request + this.embeddingChunkLimit = 16; + } + + async embedTextInput(textInput) { + const result = await this.embedChunks(textInput); + return result?.[0] || []; + } + + async embedChunks(textChunks = []) { + const textEmbeddingModel = + process.env.EMBEDDING_MODEL_PREF || "text-embedding-ada-002"; + if (!textEmbeddingModel) + throw new Error( + "No EMBEDDING_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an embedding model." + ); + + // Because there is a limit on how many chunks can be sent at once to Azure OpenAI + // we concurrently execute each max batch of text chunks possible. + // Refer to constructor embeddingChunkLimit for more info. + const embeddingRequests = []; + for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) { + embeddingRequests.push( + new Promise((resolve) => { + this.openai + .getEmbeddings(textEmbeddingModel, chunk) + .then((res) => { + resolve({ data: res.data, error: null }); + }) + .catch((e) => { + resolve({ data: [], error: e?.error }); + }); + }) + ); + } + + const { data = [], error = null } = await Promise.all( + embeddingRequests + ).then((results) => { + // If any errors were returned from Azure abort the entire sequence because the embeddings + // will be incomplete. + const errors = results + .filter((res) => !!res.error) + .map((res) => res.error) + .flat(); + if (errors.length > 0) { + return { + data: [], + error: `(${errors.length}) Embedding Errors! ${errors + .map((error) => `[${error.type}]: ${error.message}`) + .join(", ")}`, + }; + } + return { + data: results.map((res) => res?.data || []).flat(), + error: null, + }; + }); + + if (!!error) throw new Error(`Azure OpenAI Failed to embed: ${error}`); + return data.length > 0 && + data.every((embd) => embd.hasOwnProperty("embedding")) + ? data.map((embd) => embd.embedding) + : null; + } +} + +module.exports = { + AzureOpenAiEmbedder, +}; diff --git a/server/utils/EmbeddingEngines/openAi/index.js b/server/utils/EmbeddingEngines/openAi/index.js new file mode 100644 index 00000000..2c4c8c24 --- /dev/null +++ b/server/utils/EmbeddingEngines/openAi/index.js @@ -0,0 +1,78 @@ +const { toChunks } = require("../../helpers"); + +class OpenAiEmbedder { + constructor() { + const { Configuration, OpenAIApi } = require("openai"); + if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set."); + const config = new Configuration({ + apiKey: process.env.OPEN_AI_KEY, + }); + const openai = new OpenAIApi(config); + this.openai = openai; + + // Arbitrary limit to ensure we stay within reasonable POST request size. + this.embeddingChunkLimit = 1_000; + } + + async embedTextInput(textInput) { + const result = await this.embedChunks(textInput); + return result?.[0] || []; + } + + async embedChunks(textChunks = []) { + // Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb) + // we concurrently execute each max batch of text chunks possible. + // Refer to constructor embeddingChunkLimit for more info. + const embeddingRequests = []; + for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) { + embeddingRequests.push( + new Promise((resolve) => { + this.openai + .createEmbedding({ + model: "text-embedding-ada-002", + input: chunk, + }) + .then((res) => { + resolve({ data: res.data?.data, error: null }); + }) + .catch((e) => { + resolve({ data: [], error: e?.error }); + }); + }) + ); + } + + const { data = [], error = null } = await Promise.all( + embeddingRequests + ).then((results) => { + // If any errors were returned from OpenAI abort the entire sequence because the embeddings + // will be incomplete. + const errors = results + .filter((res) => !!res.error) + .map((res) => res.error) + .flat(); + if (errors.length > 0) { + return { + data: [], + error: `(${errors.length}) Embedding Errors! ${errors + .map((error) => `[${error.type}]: ${error.message}`) + .join(", ")}`, + }; + } + return { + data: results.map((res) => res?.data || []).flat(), + error: null, + }; + }); + + if (!!error) throw new Error(`OpenAI Failed to embed: ${error}`); + return data.length > 0 && + data.every((embd) => embd.hasOwnProperty("embedding")) + ? data.map((embd) => embd.embedding) + : null; + } +} + +module.exports = { + OpenAiEmbedder, +}; diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js index a3a96bc2..3873c3ac 100644 --- a/server/utils/chats/index.js +++ b/server/utils/chats/index.js @@ -1,10 +1,8 @@ const { v4: uuidv4 } = require("uuid"); -const { OpenAi } = require("../AiProviders/openAi"); const { WorkspaceChats } = require("../../models/workspaceChats"); const { resetMemory } = require("./commands/reset"); const moment = require("moment"); const { getVectorDbClass, getLLMProvider } = require("../helpers"); -const { AzureOpenAi } = require("../AiProviders/azureOpenAi"); function convertToChatHistory(history = []) { const formattedHistory = []; @@ -67,14 +65,14 @@ async function chatWithWorkspace( user = null ) { const uuid = uuidv4(); - const LLMConnector = getLLMProvider(); - const VectorDb = getVectorDbClass(); const command = grepCommand(message); if (!!command && Object.keys(VALID_COMMANDS).includes(command)) { return await VALID_COMMANDS[command](workspace, message, uuid, user); } + const LLMConnector = getLLMProvider(); + const VectorDb = getVectorDbClass(); const { safe, reasons = [] } = await LLMConnector.isSafe(message); if (!safe) { return { diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index b077606a..699ac80f 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -25,16 +25,36 @@ function getLLMProvider() { const vectorSelection = process.env.LLM_PROVIDER || "openai"; switch (vectorSelection) { case "openai": - const { OpenAi } = require("../AiProviders/openAi"); - return new OpenAi(); + const { OpenAiLLM } = require("../AiProviders/openAi"); + return new OpenAiLLM(); case "azure": - const { AzureOpenAi } = require("../AiProviders/azureOpenAi"); - return new AzureOpenAi(); + const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi"); + return new AzureOpenAiLLM(); + case "anthropic": + const { AnthropicLLM } = require("../AiProviders/anthropic"); + const embedder = getEmbeddingEngineSelection(); + return new AnthropicLLM(embedder); default: throw new Error("ENV: No LLM_PROVIDER value found in environment!"); } } +function getEmbeddingEngineSelection() { + const engineSelection = process.env.EMBEDDING_ENGINE; + switch (engineSelection) { + case "openai": + const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); + return new OpenAiEmbedder(); + case "azure": + const { + AzureOpenAiEmbedder, + } = require("../../EmbeddingEngines/azureOpenAi"); + return new AzureOpenAiEmbedder(); + default: + return null; + } +} + function toChunks(arr, size) { return Array.from({ length: Math.ceil(arr.length / size) }, (_v, i) => arr.slice(i * size, i * size + size) @@ -42,6 +62,7 @@ function toChunks(arr, size) { } module.exports = { + getEmbeddingEngineSelection, getVectorDbClass, getLLMProvider, toChunks, diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index 88b07989..3e6d6429 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -30,6 +30,21 @@ const KEY_MAPPING = { checks: [isNotEmpty], }, + // Anthropic Settings + AnthropicApiKey: { + envKey: "ANTHROPIC_API_KEY", + checks: [isNotEmpty, validAnthropicApiKey], + }, + AnthropicModelPref: { + envKey: "ANTHROPIC_MODEL_PREF", + checks: [isNotEmpty, validAnthropicModel], + }, + + EmbeddingEngine: { + envKey: "EMBEDDING_ENGINE", + checks: [supportedEmbeddingModel], + }, + // Vector Database Selection Settings VectorDB: { envKey: "VECTOR_DB", @@ -113,8 +128,14 @@ function validOpenAIKey(input = "") { return input.startsWith("sk-") ? null : "OpenAI Key must start with sk-"; } +function validAnthropicApiKey(input = "") { + return input.startsWith("sk-ant-") + ? null + : "Anthropic Key must start with sk-ant-"; +} + function supportedLLM(input = "") { - return ["openai", "azure"].includes(input); + return ["openai", "azure", "anthropic"].includes(input); } function validOpenAIModel(input = "") { @@ -124,6 +145,20 @@ function validOpenAIModel(input = "") { : `Invalid Model type. Must be one of ${validModels.join(", ")}.`; } +function validAnthropicModel(input = "") { + const validModels = ["claude-2"]; + return validModels.includes(input) + ? null + : `Invalid Model type. Must be one of ${validModels.join(", ")}.`; +} + +function supportedEmbeddingModel(input = "") { + const supported = ["openai", "azure"]; + return supported.includes(input) + ? null + : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`; +} + function supportedVectorDB(input = "") { const supported = ["chroma", "pinecone", "lancedb", "weaviate", "qdrant"]; return supported.includes(input) diff --git a/server/utils/vectorDbProviders/chroma/index.js b/server/utils/vectorDbProviders/chroma/index.js index 8e33b35e..fdc4cbe4 100644 --- a/server/utils/vectorDbProviders/chroma/index.js +++ b/server/utils/vectorDbProviders/chroma/index.js @@ -273,17 +273,11 @@ const Chroma = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); @@ -328,17 +322,12 @@ const Chroma = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, ...chatHistory, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + chatHistory, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); diff --git a/server/utils/vectorDbProviders/lance/index.js b/server/utils/vectorDbProviders/lance/index.js index 9d446058..bb150958 100644 --- a/server/utils/vectorDbProviders/lance/index.js +++ b/server/utils/vectorDbProviders/lance/index.js @@ -246,17 +246,11 @@ const LanceDb = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); @@ -296,17 +290,12 @@ const LanceDb = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, ...chatHistory, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + chatHistory, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); diff --git a/server/utils/vectorDbProviders/pinecone/index.js b/server/utils/vectorDbProviders/pinecone/index.js index 79b0d40b..fc7f4d31 100644 --- a/server/utils/vectorDbProviders/pinecone/index.js +++ b/server/utils/vectorDbProviders/pinecone/index.js @@ -242,18 +242,11 @@ const Pinecone = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - - const memory = [prompt, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); @@ -290,18 +283,12 @@ const Pinecone = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - - const memory = [prompt, ...chatHistory, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + chatHistory, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); diff --git a/server/utils/vectorDbProviders/qdrant/index.js b/server/utils/vectorDbProviders/qdrant/index.js index 2ee8e6ed..9925c6e4 100644 --- a/server/utils/vectorDbProviders/qdrant/index.js +++ b/server/utils/vectorDbProviders/qdrant/index.js @@ -282,17 +282,11 @@ const QDrant = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); @@ -332,17 +326,12 @@ const QDrant = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, ...chatHistory, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + chatHistory, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); diff --git a/server/utils/vectorDbProviders/weaviate/index.js b/server/utils/vectorDbProviders/weaviate/index.js index 8543db7d..1a43e3c5 100644 --- a/server/utils/vectorDbProviders/weaviate/index.js +++ b/server/utils/vectorDbProviders/weaviate/index.js @@ -353,18 +353,11 @@ const Weaviate = { namespace, queryVector ); - - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); @@ -404,17 +397,12 @@ const Weaviate = { namespace, queryVector ); - const prompt = { - role: "system", - content: `${chatPrompt(workspace)} - Context: - ${contextTexts - .map((text, i) => { - return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; - }) - .join("")}`, - }; - const memory = [prompt, ...chatHistory, { role: "user", content: input }]; + const memory = LLMConnector.constructPrompt({ + systemPrompt: chatPrompt(workspace), + contextTexts: contextTexts, + userPrompt: input, + chatHistory, + }); const responseText = await LLMConnector.getChatCompletion(memory, { temperature: workspace?.openAiTemp ?? 0.7, }); diff --git a/server/yarn.lock b/server/yarn.lock index 6eb89c2d..01479024 100644 --- a/server/yarn.lock +++ b/server/yarn.lock @@ -10,6 +10,21 @@ "@fortaine/fetch-event-source" "^3.0.6" cross-fetch "^3.1.5" +"@anthropic-ai/sdk@^0.8.1": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.8.1.tgz#7c7c6cb262abe3e6d0bb8bd1179b4589edd7a6ad" + integrity sha512-59etePenCizVx1O8Qhi1T1ruE04ISfNzCnyhZNcsss1QljsLmYS83jttarMNEvGYcsUF7rwxw2lzcC3Zbxao7g== + dependencies: + "@types/node" "^18.11.18" + "@types/node-fetch" "^2.6.4" + abort-controller "^3.0.0" + agentkeepalive "^4.2.1" + digest-fetch "^1.3.0" + form-data-encoder "1.7.2" + formdata-node "^4.3.2" + node-fetch "^2.6.7" + web-streams-polyfill "^3.2.1" + "@apache-arrow/ts@^12.0.0": version "12.0.1" resolved "https://registry.yarnpkg.com/@apache-arrow/ts/-/ts-12.0.1.tgz#a802a28f450886e77b32c516c370c24941767455" @@ -229,6 +244,14 @@ resolved "https://registry.yarnpkg.com/@types/command-line-usage/-/command-line-usage-5.0.2.tgz#ba5e3f6ae5a2009d466679cc431b50635bf1a064" integrity sha512-n7RlEEJ+4x4TS7ZQddTmNSxP+zziEG0TNsMfiRIxcIVXt71ENJ9ojeXmGO3wPoTdn7pJcU2xc3CJYMktNT6DPg== +"@types/node-fetch@^2.6.4": + version "2.6.7" + resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.7.tgz#a1abe2ce24228b58ad97f99480fdcf9bbc6ab16d" + integrity sha512-lX17GZVpJ/fuCjguZ5b3TjEbSENxmEk1B2z02yoXSK9WMEWRivhdSY73wWMn6bpcCDAOh6qAdktpKHIlkDk2lg== + dependencies: + "@types/node" "*" + form-data "^4.0.0" + "@types/node@*": version "20.4.2" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.4.2.tgz#129cc9ae69f93824f92fac653eebfb4812ab4af9" @@ -239,6 +262,13 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.14.5.tgz#4a13a6445862159303fc38586598a9396fc408b3" integrity sha512-CRT4tMK/DHYhw1fcCEBwME9CSaZNclxfzVMe7GsO6ULSwsttbj70wSiX6rZdIjGblu93sTJxLdhNIT85KKI7Qw== +"@types/node@^18.11.18": + version "18.18.7" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.18.7.tgz#bb3a7068dc4ba421b6968f2a259298b3a4e129e8" + integrity sha512-bw+lEsxis6eqJYW8Ql6+yTqkE6RuFtsQPSe5JxXbqYRFQEER5aJA9a5UH9igqDWm3X4iLHIKOHlnAXLM4mi7uQ== + dependencies: + undici-types "~5.26.4" + "@types/pad-left@2.1.1": version "2.1.1" resolved "https://registry.yarnpkg.com/@types/pad-left/-/pad-left-2.1.1.tgz#17d906fc75804e1cc722da73623f1d978f16a137" @@ -261,6 +291,13 @@ abbrev@1: resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== +abort-controller@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" + integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== + dependencies: + event-target-shim "^5.0.0" + accepts@~1.3.4, accepts@~1.3.8: version "1.3.8" resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" @@ -290,6 +327,13 @@ agentkeepalive@^4.1.3: depd "^2.0.0" humanize-ms "^1.2.1" +agentkeepalive@^4.2.1: + version "4.5.0" + resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.5.0.tgz#2673ad1389b3c418c5a20c5d7364f93ca04be923" + integrity sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew== + dependencies: + humanize-ms "^1.2.1" + aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" @@ -444,6 +488,11 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== +base-64@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/base-64/-/base-64-0.1.0.tgz#780a99c84e7d600260361511c4877613bf24f6bb" + integrity sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA== + base64-js@^1.3.0, base64-js@^1.3.1, base64-js@^1.5.1: version "1.5.1" resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" @@ -620,6 +669,11 @@ chalk@^2.4.2: escape-string-regexp "^1.0.5" supports-color "^5.3.0" +charenc@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" + integrity sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA== + check-disk-space@^3.4.0: version "3.4.0" resolved "https://registry.yarnpkg.com/check-disk-space/-/check-disk-space-3.4.0.tgz#eb8e69eee7a378fd12e35281b8123a8b4c4a8ff7" @@ -796,6 +850,11 @@ cross-fetch@^3.1.5: dependencies: node-fetch "^2.6.12" +crypt@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" + integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow== + debug@2.6.9: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" @@ -857,6 +916,14 @@ detect-libc@^2.0.0: resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-2.0.2.tgz#8ccf2ba9315350e1241b88d0ac3b0e1fbd99605d" integrity sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw== +digest-fetch@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/digest-fetch/-/digest-fetch-1.3.0.tgz#898e69264d00012a23cf26e8a3e40320143fc661" + integrity sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA== + dependencies: + base-64 "^0.1.0" + md5 "^2.3.0" + dotenv@^16.0.3: version "16.3.1" resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.3.1.tgz#369034de7d7e5b120972693352a3bf112172cc3e" @@ -928,6 +995,11 @@ etag@~1.8.1: resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== +event-target-shim@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" + integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== + eventemitter3@^4.0.4: version "4.0.7" resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" @@ -1050,6 +1122,11 @@ follow-redirects@^1.14.8, follow-redirects@^1.14.9: resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13" integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA== +form-data-encoder@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-1.7.2.tgz#1f1ae3dccf58ed4690b86d87e4f57c654fbab040" + integrity sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A== + form-data@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f" @@ -1068,6 +1145,14 @@ form-data@^4.0.0: combined-stream "^1.0.8" mime-types "^2.1.12" +formdata-node@^4.3.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/formdata-node/-/formdata-node-4.4.1.tgz#23f6a5cb9cb55315912cbec4ff7b0f59bbd191e2" + integrity sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ== + dependencies: + node-domexception "1.0.0" + web-streams-polyfill "4.0.0-beta.3" + forwarded@0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" @@ -1416,6 +1501,11 @@ is-binary-path@~2.1.0: dependencies: binary-extensions "^2.0.0" +is-buffer@~1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== + is-extglob@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" @@ -1675,6 +1765,15 @@ make-fetch-happen@^9.1.0: socks-proxy-agent "^6.0.0" ssri "^8.0.0" +md5@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/md5/-/md5-2.3.0.tgz#c3da9a6aae3a30b46b7b0c349b87b110dc3bda4f" + integrity sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g== + dependencies: + charenc "0.0.2" + crypt "0.0.2" + is-buffer "~1.1.6" + media-typer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" @@ -1886,6 +1985,11 @@ node-addon-api@^5.0.0: resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-5.1.0.tgz#49da1ca055e109a23d537e9de43c09cca21eb762" integrity sha512-eh0GgfEkpnoWDq+VY8OyvYhFEzBk6jIYbRKdIlyTiAXIVJ8PyBaKb0rp7oDtoddbdoHWhq8wwr+XZ81F1rpNdA== +node-domexception@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/node-domexception/-/node-domexception-1.0.0.tgz#6888db46a1f71c0b76b3f7555016b63fe64766e5" + integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ== + node-fetch@^2.6.1, node-fetch@^2.6.12, node-fetch@^2.6.7, node-fetch@^2.6.9: version "2.6.12" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.12.tgz#02eb8e22074018e3d5a83016649d04df0e348fba" @@ -2585,6 +2689,11 @@ undefsafe@^2.0.5: resolved "https://registry.yarnpkg.com/undefsafe/-/undefsafe-2.0.5.tgz#38733b9327bdcd226db889fb723a6efd162e6e2c" integrity sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA== +undici-types@~5.26.4: + version "5.26.5" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" + integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== + undici@^5.22.1: version "5.23.0" resolved "https://registry.yarnpkg.com/undici/-/undici-5.23.0.tgz#e7bdb0ed42cebe7b7aca87ced53e6eaafb8f8ca0" @@ -2668,6 +2777,16 @@ weaviate-ts-client@^1.4.0: isomorphic-fetch "^3.0.0" uuid "^9.0.0" +web-streams-polyfill@4.0.0-beta.3: + version "4.0.0-beta.3" + resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz#2898486b74f5156095e473efe989dcf185047a38" + integrity sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug== + +web-streams-polyfill@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6" + integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q== + webidl-conversions@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"