From 1f29cec9182b63141207f3cd09d568ac1cc06a8d Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Fri, 4 Aug 2023 14:56:27 -0700 Subject: [PATCH] Multiple LLM Support framework + AzureOpenAI Support (#180) * Remove LangchainJS for chat support chaining Implement runtime LLM selection Implement AzureOpenAI Support for LLM + Emebedding WIP on frontend Update env to reflect the new fields * Remove LangchainJS for chat support chaining Implement runtime LLM selection Implement AzureOpenAI Support for LLM + Emebedding WIP on frontend Update env to reflect the new fields * Replace keys with LLM Selection in settings modal Enforce checks for new ENVs depending on LLM selection --- .vscode/settings.json | 5 + docker/.env.example | 21 +- .../components/Modals/Settings/Keys/index.jsx | 220 -------------- .../Modals/Settings/LLMSelection/index.jsx | 281 ++++++++++++++++++ .../Modals/Settings/VectorDbs/index.jsx | 4 +- .../src/components/Modals/Settings/index.jsx | 25 +- frontend/src/media/llmprovider/anthropic.png | Bin 0 -> 11892 bytes frontend/src/media/llmprovider/azure.png | Bin 0 -> 34705 bytes frontend/src/media/llmprovider/openai.png | Bin 0 -> 22744 bytes server/.env.example | 21 +- server/endpoints/system.js | 21 +- server/package.json | 3 +- server/utils/AiProviders/azureOpenAi/index.js | 99 ++++++ server/utils/AiProviders/openAi/index.js | 3 +- server/utils/chats/index.js | 13 +- server/utils/helpers/index.js | 22 +- server/utils/helpers/updateENV.js | 41 +++ .../utils/vectorDbProviders/chroma/index.js | 72 ++--- server/utils/vectorDbProviders/lance/index.js | 20 +- .../utils/vectorDbProviders/pinecone/index.js | 58 ++-- server/yarn.lock | 106 +++++++ 21 files changed, 699 insertions(+), 336 deletions(-) create mode 100644 .vscode/settings.json delete mode 100644 frontend/src/components/Modals/Settings/Keys/index.jsx create mode 100644 frontend/src/components/Modals/Settings/LLMSelection/index.jsx create mode 100644 frontend/src/media/llmprovider/anthropic.png create mode 100644 frontend/src/media/llmprovider/azure.png create mode 100644 frontend/src/media/llmprovider/openai.png create mode 100644 server/utils/AiProviders/azureOpenAi/index.js diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..450dd779 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "cSpell.words": [ + "openai" + ] +} \ No newline at end of file diff --git a/docker/.env.example b/docker/.env.example index e44acd02..6b9791eb 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1,8 +1,24 @@ SERVER_PORT=3001 -OPEN_AI_KEY= -OPEN_MODEL_PREF='gpt-3.5-turbo' CACHE_VECTORS="true" +# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long. +########################################### +######## LLM API SElECTION ################ +########################################### +LLM_PROVIDER='openai' +# OPEN_AI_KEY= +OPEN_MODEL_PREF='gpt-3.5-turbo' + +# LLM_PROVIDER='azure' +# AZURE_OPENAI_ENDPOINT= +# AZURE_OPENAI_KEY= +# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model. +# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 + + +########################################### +######## Vector Database Selection ######## +########################################### # Enable all below if you are using vector database: Chroma. # VECTOR_DB="chroma" # CHROMA_ENDPOINT='http://localhost:8000' @@ -18,7 +34,6 @@ PINECONE_INDEX= # CLOUD DEPLOYMENT VARIRABLES ONLY # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. -# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long. # NO_DEBUG="true" STORAGE_DIR="./server/storage" GOOGLE_APIS_KEY= diff --git a/frontend/src/components/Modals/Settings/Keys/index.jsx b/frontend/src/components/Modals/Settings/Keys/index.jsx deleted file mode 100644 index 84b75373..00000000 --- a/frontend/src/components/Modals/Settings/Keys/index.jsx +++ /dev/null @@ -1,220 +0,0 @@ -import React, { useState } from "react"; -import { AlertCircle, Loader } from "react-feather"; -import System from "../../../../models/system"; - -const noop = () => false; -export default function SystemKeys({ hideModal = noop, user, settings = {} }) { - const canDebug = settings.MultiUserMode - ? settings?.CanDebug && user?.role === "admin" - : settings?.CanDebug; - function validSettings(settings) { - return ( - settings?.OpenAiKey && - !!settings?.OpenAiModelPref && - !!settings?.VectorDB && - (settings?.VectorDB === "chroma" ? !!settings?.ChromaEndpoint : true) && - (settings?.VectorDB === "pinecone" - ? !!settings?.PineConeKey && - !!settings?.PineConeEnvironment && - !!settings?.PineConeIndex - : true) - ); - } - - return ( -
-
-
-

- These are the credentials and settings for how your AnythingLLM - instance will function. Its important these keys are current and - correct. -

-
-
-
- {!validSettings(settings) && ( -
- -

- Ensure all fields are green before attempting to use - AnythingLLM or it may not function as expected! -

-
- )} - - -
-
-
- -
-
-
- ); -} - -function ShowKey({ name, env, value, valid, allowDebug = true }) { - const [isValid, setIsValid] = useState(valid); - const [debug, setDebug] = useState(false); - const [saving, setSaving] = useState(false); - const handleSubmit = async (e) => { - e.preventDefault(); - setSaving(true); - const data = {}; - const form = new FormData(e.target); - for (var [key, value] of form.entries()) data[key] = value; - const { error } = await System.updateSystem(data); - if (!!error) { - alert(error); - setSaving(false); - setIsValid(false); - return; - } - - setSaving(false); - setDebug(false); - setIsValid(true); - }; - - if (!isValid) { - return ( -
-
- - -
-

- Need setup in .env file. -

- {allowDebug && ( - <> - {debug ? ( -
- {saving ? ( - <> - - - ) : ( - <> - - - - )} -
- ) : ( - - )} - - )} -
-
-
- ); - } - - return ( -
-
- - - {allowDebug && ( -
- {debug ? ( -
- {saving ? ( - <> - - - ) : ( - <> - - - - )} -
- ) : ( - - )} -
- )} -
-
- ); -} diff --git a/frontend/src/components/Modals/Settings/LLMSelection/index.jsx b/frontend/src/components/Modals/Settings/LLMSelection/index.jsx new file mode 100644 index 00000000..94b75ace --- /dev/null +++ b/frontend/src/components/Modals/Settings/LLMSelection/index.jsx @@ -0,0 +1,281 @@ +import React, { useState } from "react"; +import System from "../../../../models/system"; +import OpenAiLogo from "../../../../media/llmprovider/openai.png"; +import AzureOpenAiLogo from "../../../../media/llmprovider/azure.png"; +import AnthropicLogo from "../../../../media/llmprovider/anthropic.png"; + +const noop = () => false; +export default function LLMSelection({ + hideModal = noop, + user, + settings = {}, +}) { + const [hasChanges, setHasChanges] = useState(false); + const [llmChoice, setLLMChoice] = useState(settings?.LLMProvider || "openai"); + const [saving, setSaving] = useState(false); + const [error, setError] = useState(null); + const canDebug = settings.MultiUserMode + ? settings?.CanDebug && user?.role === "admin" + : settings?.CanDebug; + + function updateLLMChoice(selection) { + if (!canDebug || selection === llmChoice) return false; + setHasChanges(true); + setLLMChoice(selection); + } + + const handleSubmit = async (e) => { + e.preventDefault(); + setSaving(true); + setError(null); + const data = {}; + const form = new FormData(e.target); + for (var [key, value] of form.entries()) data[key] = value; + const { error } = await System.updateSystem(data); + setError(error); + setSaving(false); + setHasChanges(!!error ? true : false); + }; + return ( +
+
+
+

+ These are the credentials and settings for your preferred LLM chat & + embedding provider. Its important these keys are current and correct + or else AnythingLLM will not function properly. +

+
+ + {!!error && ( +
+

{error}

+
+ )} + +
setHasChanges(true)}> +
+
+

+ LLM providers +

+
+ + + + +
+ {llmChoice === "openai" && ( + <> +
+ + +
+ +
+ + +
+ + )} + + {llmChoice === "azure" && ( + <> +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ + )} + + {llmChoice === "anthropic-claude-2" && ( +
+

+ This provider is unavailable and cannot be used in + AnythingLLM currently. +

+
+ )} +
+
+
+ +
+
+
+ +
+
+
+ ); +} + +const LLMProviderOption = ({ + name, + link, + description, + value, + image, + checked = false, + onClick, +}) => { + return ( +
onClick(value)}> + + +
+ ); +}; diff --git a/frontend/src/components/Modals/Settings/VectorDbs/index.jsx b/frontend/src/components/Modals/Settings/VectorDbs/index.jsx index 0c4f5a38..c4ad0aec 100644 --- a/frontend/src/components/Modals/Settings/VectorDbs/index.jsx +++ b/frontend/src/components/Modals/Settings/VectorDbs/index.jsx @@ -57,7 +57,7 @@ export default function VectorDBSelection({

- Vector database provider + Vector database providers

@@ -96,7 +96,7 @@ export default function VectorDBSelection({ Pinecone DB API Key false; export default function SystemSettingsModal({ hideModal = noop }) { const { user } = useUser(); const [loading, setLoading] = useState(true); - const [selectedTab, setSelectedTab] = useState("keys"); + const [selectedTab, setSelectedTab] = useState("llm"); const [settings, setSettings] = useState(null); - const Component = TABS[selectedTab || "keys"]; + const Component = TABS[selectedTab || "llm"]; useEffect(() => { async function fetchKeys() { @@ -87,10 +94,10 @@ function SettingTabs({ selectedTab, changeTab, settings, user }) { return (