From 5c3bb4b8cce9a9ff7addb571f81aad6483dadbdd Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Tue, 9 Jan 2024 13:07:09 -0800 Subject: [PATCH 01/41] 532 uiux add slash command modal (#555) * WIP slash commands * add slash command image * WIP slash commands * slash command menu feature complete * move icons to slash command local * update how slash command component works * relint with new linter * Finalize slash command input Change empty workspace text layout Patch dev unmount issues on Chatworkspace/index.jsx --------- Co-authored-by: timothycarambat --- frontend/src/App.jsx | 28 ++++---- .../ChatContainer/ChatHistory/index.jsx | 20 +++--- .../icons/slash-commands-icon.svg | 4 ++ .../PromptInput/SlashCommands/index.jsx | 68 +++++++++++++++++++ .../ChatContainer/PromptInput/index.jsx | 26 ++++--- .../WorkspaceChat/ChatContainer/index.jsx | 26 ++++++- frontend/src/index.css | 14 +++- 7 files changed, 148 insertions(+), 38 deletions(-) create mode 100644 frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/SlashCommands/icons/slash-commands-icon.svg create mode 100644 frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/SlashCommands/index.jsx diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index 8007b5ad1..fa74d434e 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -19,28 +19,28 @@ const AdminInvites = lazy(() => import("@/pages/Admin/Invitations")); const AdminWorkspaces = lazy(() => import("@/pages/Admin/Workspaces")); const AdminSystem = lazy(() => import("@/pages/Admin/System")); const GeneralChats = lazy(() => import("@/pages/GeneralSettings/Chats")); -const GeneralAppearance = lazy(() => - import("@/pages/GeneralSettings/Appearance") +const GeneralAppearance = lazy( + () => import("@/pages/GeneralSettings/Appearance") ); const GeneralApiKeys = lazy(() => import("@/pages/GeneralSettings/ApiKeys")); -const GeneralLLMPreference = lazy(() => - import("@/pages/GeneralSettings/LLMPreference") +const GeneralLLMPreference = lazy( + () => import("@/pages/GeneralSettings/LLMPreference") ); -const GeneralEmbeddingPreference = lazy(() => - import("@/pages/GeneralSettings/EmbeddingPreference") +const GeneralEmbeddingPreference = lazy( + () => import("@/pages/GeneralSettings/EmbeddingPreference") ); -const GeneralVectorDatabase = lazy(() => - import("@/pages/GeneralSettings/VectorDatabase") +const GeneralVectorDatabase = lazy( + () => import("@/pages/GeneralSettings/VectorDatabase") ); -const GeneralExportImport = lazy(() => - import("@/pages/GeneralSettings/ExportImport") +const GeneralExportImport = lazy( + () => import("@/pages/GeneralSettings/ExportImport") ); const GeneralSecurity = lazy(() => import("@/pages/GeneralSettings/Security")); -const DataConnectors = lazy(() => - import("@/pages/GeneralSettings/DataConnectors") +const DataConnectors = lazy( + () => import("@/pages/GeneralSettings/DataConnectors") ); -const DataConnectorSetup = lazy(() => - import("@/pages/GeneralSettings/DataConnectors/Connectors") +const DataConnectorSetup = lazy( + () => import("@/pages/GeneralSettings/DataConnectors/Connectors") ); const OnboardingFlow = lazy(() => import("@/pages/OnboardingFlow")); diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx index 1de2504b7..4a7cd4827 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx @@ -24,16 +24,14 @@ export default function ChatHistory({ history = [], workspace }) { }; const debouncedScroll = debounce(handleScroll, 100); - useEffect(() => { - if (!chatHistoryRef.current) return null; - const chatHistoryElement = chatHistoryRef.current; - chatHistoryElement.addEventListener("scroll", debouncedScroll); - - return () => { - chatHistoryElement.removeEventListener("scroll", debouncedScroll); - debouncedScroll.cancel(); - }; + function watchScrollEvent() { + if (!chatHistoryRef.current) return null; + const chatHistoryElement = chatHistoryRef.current; + if (!chatHistoryElement) return null; + chatHistoryElement.addEventListener("scroll", debouncedScroll); + } + watchScrollEvent(); }, []); const scrollToBottom = () => { @@ -49,11 +47,11 @@ export default function ChatHistory({ history = [], workspace }) { return (
-

+

Welcome to your new workspace.

-

+

To get started either{" "} + + + diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/SlashCommands/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/SlashCommands/index.jsx new file mode 100644 index 000000000..0e4f26aa9 --- /dev/null +++ b/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/SlashCommands/index.jsx @@ -0,0 +1,68 @@ +import { useEffect, useRef, useState } from "react"; +import SlashCommandIcon from "./icons/slash-commands-icon.svg"; + +export default function SlashCommandsButton({ showing, setShowSlashCommand }) { + return ( +

setShowSlashCommand(!showing)} + className={`flex justify-center items-center opacity-60 hover:opacity-100 cursor-pointer ${ + showing ? "!opacity-100" : "" + }`} + > + Slash commands button +
+ ); +} + +export function SlashCommands({ showing, setShowing, sendCommand }) { + const cmdRef = useRef(null); + useEffect(() => { + function listenForOutsideClick() { + if (!showing || !cmdRef.current) return false; + document.addEventListener("click", closeIfOutside); + } + listenForOutsideClick(); + }, [showing, cmdRef.current]); + + if (!showing) return null; + const closeIfOutside = ({ target }) => { + if (target.id === "slash-cmd-btn") return; + const isOutside = !cmdRef?.current?.contains(target); + if (!isOutside) return; + setShowing(false); + }; + + return ( +
+
+ +
+
+ ); +} + +export function useSlashCommands() { + const [showSlashCommand, setShowSlashCommand] = useState(false); + return { showSlashCommand, setShowSlashCommand }; +} diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/index.jsx index 14daff809..e141cc0a4 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/PromptInput/index.jsx @@ -11,6 +11,10 @@ import ManageWorkspace, { useManageWorkspaceModal, } from "../../../Modals/MangeWorkspace"; import useUser from "@/hooks/useUser"; +import SlashCommandsButton, { + SlashCommands, + useSlashCommands, +} from "./SlashCommands"; export default function PromptInput({ workspace, @@ -19,7 +23,9 @@ export default function PromptInput({ onChange, inputDisabled, buttonDisabled, + sendCommand, }) { + const { showSlashCommand, setShowSlashCommand } = useSlashCommands(); const { showing, showModal, hideModal } = useManageWorkspaceModal(); const formRef = useRef(null); const [_, setFocused] = useState(false); @@ -49,7 +55,12 @@ export default function PromptInput({ }; return ( -
+
+
)} - - {/* */} +
- {/* */}
diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx index 34ae8de9a..6dd1cdf50 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx @@ -10,7 +10,6 @@ export default function ChatContainer({ workspace, knownHistory = [] }) { const [message, setMessage] = useState(""); const [loadingResponse, setLoadingResponse] = useState(false); const [chatHistory, setChatHistory] = useState(knownHistory); - const handleMessageChange = (event) => { setMessage(event.target.value); }; @@ -36,6 +35,30 @@ export default function ChatContainer({ workspace, knownHistory = [] }) { setLoadingResponse(true); }; + const sendCommand = async (command, submit = false) => { + if (!command || command === "") return false; + if (!submit) { + setMessage(command); + return; + } + + const prevChatHistory = [ + ...chatHistory, + { content: command, role: "user" }, + { + content: "", + role: "assistant", + pending: true, + userMessage: command, + animate: true, + }, + ]; + + setChatHistory(prevChatHistory); + setMessage(""); + setLoadingResponse(true); + }; + useEffect(() => { async function fetchReply() { const promptMessage = @@ -97,6 +120,7 @@ export default function ChatContainer({ workspace, knownHistory = [] }) { onChange={handleMessageChange} inputDisabled={loadingResponse} buttonDisabled={loadingResponse} + sendCommand={sendCommand} />
diff --git a/frontend/src/index.css b/frontend/src/index.css index a7aef9a7e..1d1b2da85 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -6,8 +6,18 @@ html, body { padding: 0; margin: 0; - font-family: "plus-jakarta-sans", -apple-system, BlinkMacSystemFont, Segoe UI, - Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, + font-family: + "plus-jakarta-sans", + -apple-system, + BlinkMacSystemFont, + Segoe UI, + Roboto, + Oxygen, + Ubuntu, + Cantarell, + Fira Sans, + Droid Sans, + Helvetica Neue, sans-serif; background-color: white; } From fd4a2306692c8359e19b14b4fb8072276c56a44e Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 9 Jan 2024 14:25:53 -0800 Subject: [PATCH 02/41] Setup issue and PR templates (#559) * Setup issue templates Allow ability to include blank issue resolves #557 todo: PR template * update templates + add PR template * newlines --- .github/ISSUE_TEMPLATE/01_bug.yml | 41 +++++++++++++++++++++ .github/ISSUE_TEMPLATE/02_feature.yml | 18 +++++++++ .github/ISSUE_TEMPLATE/03_documentation.yml | 13 +++++++ .github/ISSUE_TEMPLATE/config.yml | 8 ++++ .github/workflows/build-and-push-image.yaml | 1 + pull_request_template.md | 36 ++++++++++++++++++ 6 files changed, 117 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/01_bug.yml create mode 100644 .github/ISSUE_TEMPLATE/02_feature.yml create mode 100644 .github/ISSUE_TEMPLATE/03_documentation.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 pull_request_template.md diff --git a/.github/ISSUE_TEMPLATE/01_bug.yml b/.github/ISSUE_TEMPLATE/01_bug.yml new file mode 100644 index 000000000..043c78523 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/01_bug.yml @@ -0,0 +1,41 @@ +name: 🐛 Bug Report +description: File a bug report for AnythingLLM +title: "[BUG]: " +labels: [possible-bug] +body: + - type: markdown + attributes: + value: Use this template to file a bug report for AnythingLLM. Please be as descriptive as possible to allow everyone to replicate and solve your issue. + + - type: dropdown + id: runtime + attributes: + label: How are you running AnythingLLM? + description: AnythingLLM can be run in many environments, pick the one that best represents where you encounter the bug. + options: + - Docker (local) + - Docker (remote machine) + - Local development + - AnythingLLM desktop app + - Not listed + default: 0 + validations: + required: true + + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + validations: + required: true + + - type: textarea + id: reproduction + attributes: + label: Are there known steps to reproduce? + description: | + Let us know how to reproduce the bug and we may be able to fix it more + quickly. This is not required, but it is helpful. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/02_feature.yml b/.github/ISSUE_TEMPLATE/02_feature.yml new file mode 100644 index 000000000..7ca0d0c92 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/02_feature.yml @@ -0,0 +1,18 @@ +name: ✨ New Feature suggestion +description: Suggest a new feature for AnythingLLM! +title: "[FEAT]: " +labels: [enhancement, feature-request] +body: + - type: markdown + attributes: + value: | + Share a new idea for a feature or improvement. Be sure to search existing + issues first to avoid duplicates. + - type: textarea + id: description + attributes: + label: What would you like to see? + description: | + Describe the feature and why it would be useful to your use-case as well as others. + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/03_documentation.yml b/.github/ISSUE_TEMPLATE/03_documentation.yml new file mode 100644 index 000000000..55800856c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/03_documentation.yml @@ -0,0 +1,13 @@ +name: 📚 Documentation improvement +title: "[DOCS]: " +description: Report an issue or problem with the documentation. +labels: [documentation] + +body: + - type: textarea + id: description + attributes: + label: Description + description: Describe the issue with the documentation that is giving you trouble or causing confusion. + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..ebf87d274 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: 🧑‍🤝‍🧑 Community Discord + url: https://discord.gg/6UyHPeGZAC + about: Interact with the Mintplex Labs community here by asking for help, discussing and more! + - name: 📧 E-mail the team + url: "mailto:team@mintplexlabs.com" + about: Contact the core-team about a question. diff --git a/.github/workflows/build-and-push-image.yaml b/.github/workflows/build-and-push-image.yaml index b8ac6348f..f3100842f 100644 --- a/.github/workflows/build-and-push-image.yaml +++ b/.github/workflows/build-and-push-image.yaml @@ -19,6 +19,7 @@ on: - 'images/*' - '.vscode/*' - '**/.env.example' + - '.github/ISSUE_TEMPLATE/*' jobs: push_multi_platform_to_registries: diff --git a/pull_request_template.md b/pull_request_template.md new file mode 100644 index 000000000..1167880b1 --- /dev/null +++ b/pull_request_template.md @@ -0,0 +1,36 @@ + + ### Pull Request Type + + + +- [ ] ✨ feat +- [ ] 🐛 fix +- [ ] ♻️ refactor +- [ ] 💄 style +- [ ] 🔨 chore +- [ ] 📝 docs + +### Relevant Issues + + + +resolves #xxx + + +### What is in this change? + +Describe the changes in this PR that are impactful to the repo. + + +### Additional Information + +Add any other context about the Pull Request here that was not captured above. + +### Developer Validations + + + +- [ ] I ran `yarn lint` from the root of the repo & committed changes +- [ ] Relevant documentation has been updated +- [ ] I have tested my code functionality +- [ ] Docker build succeeds locally From 964d9a71379f3ac072618ddbe120bc6f42572a60 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Tue, 9 Jan 2024 14:28:56 -0800 Subject: [PATCH 03/41] update build ignore --- .github/workflows/build-and-push-image.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-and-push-image.yaml b/.github/workflows/build-and-push-image.yaml index f3100842f..17ca5a975 100644 --- a/.github/workflows/build-and-push-image.yaml +++ b/.github/workflows/build-and-push-image.yaml @@ -14,12 +14,12 @@ on: push: branches: ['master'] # master branch only. Do not modify. paths-ignore: - - '*.md' + - '**.md' - 'cloud-deployments/*' - - 'images/*' - - '.vscode/*' + - 'images/**/*' + - '.vscode/**/*' - '**/.env.example' - - '.github/ISSUE_TEMPLATE/*' + - '.github/ISSUE_TEMPLATE/**/*' jobs: push_multi_platform_to_registries: From 6a4c99affe932e43f282abcee1b8bee92d31f416 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Tue, 9 Jan 2024 14:31:02 -0800 Subject: [PATCH 04/41] unquote email in config --- .github/ISSUE_TEMPLATE/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index ebf87d274..1acaaf520 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,5 +4,5 @@ contact_links: url: https://discord.gg/6UyHPeGZAC about: Interact with the Mintplex Labs community here by asking for help, discussing and more! - name: 📧 E-mail the team - url: "mailto:team@mintplexlabs.com" + url: mailto:team@mintplexlabs.com about: Contact the core-team about a question. From 4801df08c5e4cd45eca47378c7e6c802237a1e58 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Tue, 9 Jan 2024 14:31:28 -0800 Subject: [PATCH 05/41] remove broken config link --- .github/ISSUE_TEMPLATE/config.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 1acaaf520..d5485e65d 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -3,6 +3,3 @@ contact_links: - name: 🧑‍🤝‍🧑 Community Discord url: https://discord.gg/6UyHPeGZAC about: Interact with the Mintplex Labs community here by asking for help, discussing and more! - - name: 📧 E-mail the team - url: mailto:team@mintplexlabs.com - about: Contact the core-team about a question. From b8192883c2f4a9f17b01fe059ad64717e9b5981a Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Tue, 9 Jan 2024 18:06:01 -0800 Subject: [PATCH 06/41] fix auto-tag on bug issue yaml --- .github/ISSUE_TEMPLATE/01_bug.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/01_bug.yml b/.github/ISSUE_TEMPLATE/01_bug.yml index 043c78523..5aa940089 100644 --- a/.github/ISSUE_TEMPLATE/01_bug.yml +++ b/.github/ISSUE_TEMPLATE/01_bug.yml @@ -1,7 +1,7 @@ name: 🐛 Bug Report description: File a bug report for AnythingLLM title: "[BUG]: " -labels: [possible-bug] +labels: [possible bug] body: - type: markdown attributes: From 5b2c0ca782769559687d51123d52b30677322d06 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Tue, 9 Jan 2024 18:07:41 -0800 Subject: [PATCH 07/41] add OnboardAI link --- .github/ISSUE_TEMPLATE/01_bug.yml | 5 ++++- .github/ISSUE_TEMPLATE/02_feature.yml | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/01_bug.yml b/.github/ISSUE_TEMPLATE/01_bug.yml index 5aa940089..d1ca5aba9 100644 --- a/.github/ISSUE_TEMPLATE/01_bug.yml +++ b/.github/ISSUE_TEMPLATE/01_bug.yml @@ -5,7 +5,10 @@ labels: [possible bug] body: - type: markdown attributes: - value: Use this template to file a bug report for AnythingLLM. Please be as descriptive as possible to allow everyone to replicate and solve your issue. + value: | + Use this template to file a bug report for AnythingLLM. Please be as descriptive as possible to allow everyone to replicate and solve your issue. + + Want help contributing a PR? Use our repo chatbot by OnboardAI! https://app.getonboardai.com/chat/github/mintplex-labs/anything-llm - type: dropdown id: runtime diff --git a/.github/ISSUE_TEMPLATE/02_feature.yml b/.github/ISSUE_TEMPLATE/02_feature.yml index 7ca0d0c92..77fc3c50f 100644 --- a/.github/ISSUE_TEMPLATE/02_feature.yml +++ b/.github/ISSUE_TEMPLATE/02_feature.yml @@ -8,6 +8,10 @@ body: value: | Share a new idea for a feature or improvement. Be sure to search existing issues first to avoid duplicates. + + Want help contributing a PR? Use our repo chatbot by OnboardAI! https://app.getonboardai.com/chat/github/mintplex-labs/anything-llm + + - type: textarea id: description attributes: From 755c10b8ca1a5687902a6597a2dea352620ae88a Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 9 Jan 2024 19:49:51 -0800 Subject: [PATCH 08/41] [API] patch swagger host to be relative (#563) patch swagger host to be relative --- server/swagger/init.js | 18 ++++++++++++++++-- server/swagger/openapi.json | 2 +- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/server/swagger/init.js b/server/swagger/init.js index c84daf323..b68e3249c 100644 --- a/server/swagger/init.js +++ b/server/swagger/init.js @@ -1,4 +1,6 @@ const swaggerAutogen = require('swagger-autogen')({ openapi: '3.0.0' }); +const fs = require('fs') +const path = require('path') const doc = { info: { @@ -6,6 +8,8 @@ const doc = { title: 'AnythingLLM Developer API', description: 'API endpoints that enable programmatic reading, writing, and updating of your AnythingLLM instance. UI supplied by Swagger.io.', }, + // Swagger-autogen does not allow us to use relative paths as these will resolve to + // http:///api in the openapi.json file, so we need to monkey-patch this post-generation. host: '/api', schemes: ['http'], securityDefinitions: { @@ -25,7 +29,7 @@ const doc = { } }; -const outputFile = './openapi.json'; +const outputFile = path.resolve(__dirname, './openapi.json'); const endpointsFiles = [ '../endpoints/api/auth/index.js', '../endpoints/api/admin/index.js', @@ -34,4 +38,14 @@ const endpointsFiles = [ '../endpoints/api/system/index.js', ]; -swaggerAutogen(outputFile, endpointsFiles, doc) \ No newline at end of file +swaggerAutogen(outputFile, endpointsFiles, doc) + .then(({ data }) => { + const openApiSpec = { + ...data, + servers: [{ + url: "/api" + }] + } + fs.writeFileSync(outputFile, JSON.stringify(openApiSpec, null, 2), { encoding: 'utf-8', flag: 'w' }); + console.log(`Swagger-autogen: \x1b[32mPatched servers.url ✔\x1b[0m`) + }) \ No newline at end of file diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index cb065522e..184723ed7 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -7,7 +7,7 @@ }, "servers": [ { - "url": "http:///api/" + "url": "/api" } ], "paths": { From 259079ac58fdeb55c46500921b778a93af48560a Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 9 Jan 2024 21:52:50 -0800 Subject: [PATCH 09/41] 561 relative api docs url (#564) * patch swagger host to be relative * change tag on feature request template --- .github/ISSUE_TEMPLATE/02_feature.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/02_feature.yml b/.github/ISSUE_TEMPLATE/02_feature.yml index 77fc3c50f..0fd29cbfb 100644 --- a/.github/ISSUE_TEMPLATE/02_feature.yml +++ b/.github/ISSUE_TEMPLATE/02_feature.yml @@ -1,7 +1,7 @@ name: ✨ New Feature suggestion description: Suggest a new feature for AnythingLLM! title: "[FEAT]: " -labels: [enhancement, feature-request] +labels: [enhancement, feature request] body: - type: markdown attributes: From 8cd3a92c660b202655d99bee90b2864694c99946 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Wed, 10 Jan 2024 08:42:03 -0800 Subject: [PATCH 10/41] [BUG] Fixed mass_assignment vuln (#566) Fixed mass_assignment vuln Co-authored-by: dastaj <78434825+dastaj@users.noreply.github.com> --- server/endpoints/invite.js | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/endpoints/invite.js b/server/endpoints/invite.js index 08f9a14e9..c5c344510 100644 --- a/server/endpoints/invite.js +++ b/server/endpoints/invite.js @@ -33,7 +33,7 @@ function inviteEndpoints(app) { app.post("/invite/:code", async (request, response) => { try { const { code } = request.params; - const userParams = reqBody(request); + const { username, password } = reqBody(request); const invite = await Invite.get({ code }); if (!invite || invite.status !== "pending") { response @@ -42,7 +42,11 @@ function inviteEndpoints(app) { return; } - const { user, error } = await User.create(userParams); + const { user, error } = await User.create(({ + username, + password, + role: "default", + })); if (!user) { console.error("Accepting invite:", error); response From 1d39b8a2ceca4bb62362eb5b7ad8f66750a317a5 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Wed, 10 Jan 2024 12:35:30 -0800 Subject: [PATCH 11/41] add Together AI LLM support (#560) * add Together AI LLM support * update readme to support together ai * Patch togetherAI implementation * add model sorting/option labels by organization for model selection * linting + add data handling for TogetherAI * change truthy statement patch validLLMSelection method --------- Co-authored-by: timothycarambat --- README.md | 31 ++- docker/.env.example | 4 + .../LLMSelection/TogetherAiOptions/index.jsx | 95 ++++++++ frontend/src/media/llmprovider/togetherai.png | Bin 0 -> 8356 bytes .../GeneralSettings/LLMPreference/index.jsx | 9 + .../Steps/DataHandling/index.jsx | 15 +- .../Steps/LLMPreference/index.jsx | 9 + server/.env.example | 4 + server/models/systemSettings.js | 18 ++ server/utils/AiProviders/togetherAi/index.js | 198 +++++++++++++++ server/utils/AiProviders/togetherAi/models.js | 226 ++++++++++++++++++ .../AiProviders/togetherAi/scripts/.gitignore | 1 + .../togetherAi/scripts/chat_models.txt | 39 +++ .../AiProviders/togetherAi/scripts/parse.mjs | 41 ++++ server/utils/chats/stream.js | 90 +++++++ server/utils/helpers/customModels.js | 26 +- server/utils/helpers/index.js | 3 + server/utils/helpers/updateENV.js | 14 +- 18 files changed, 809 insertions(+), 14 deletions(-) create mode 100644 frontend/src/components/LLMSelection/TogetherAiOptions/index.jsx create mode 100644 frontend/src/media/llmprovider/togetherai.png create mode 100644 server/utils/AiProviders/togetherAi/index.js create mode 100644 server/utils/AiProviders/togetherAi/models.js create mode 100644 server/utils/AiProviders/togetherAi/scripts/.gitignore create mode 100644 server/utils/AiProviders/togetherAi/scripts/chat_models.txt create mode 100644 server/utils/AiProviders/togetherAi/scripts/parse.mjs diff --git a/README.md b/README.md index 62d58d870..5af9278b4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ +

AnythingLLM logo

@@ -38,13 +39,14 @@ A full-stack application that enables you to turn any document, resource, or pie - ### Product Overview + AnythingLLM is a full-stack application where you can use commercial off-the-shelf LLMs or popular open source LLMs and vectorDB solutions to build a private ChatGPT with no compromises that you can run locally as well as host remotely and be able to chat intelligently with any documents you provide it. AnythingLLM divides your documents into objects called `workspaces`. A Workspace functions a lot like a thread, but with the addition of containerization of your documents. Workspaces can share documents, but they do not talk to each other so you can keep your context for each workspace clean. Some cool features of AnythingLLM + - **Multi-user instance support and permissioning** - Multiple document type support (PDF, TXT, DOCX, etc) - Manage documents in your vector database from a simple UI @@ -57,7 +59,9 @@ Some cool features of AnythingLLM - Full Developer API for custom integrations! ### Supported LLMs, Embedders, and Vector Databases + **Supported LLMs:** + - [Any open-source llama.cpp compatible model](/server/storage/models/README.md#text-generation-llm-selection) - [OpenAI](https://openai.com) - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) @@ -66,8 +70,10 @@ Some cool features of AnythingLLM - [Ollama (chat models)](https://ollama.ai/) - [LM Studio (all models)](https://lmstudio.ai) - [LocalAi (all models)](https://localai.io/) +- [Together AI (chat models)](https://www.together.ai/) **Supported Embedding models:** + - [AnythingLLM Native Embedder](/server/storage/models/README.md) (default) - [OpenAI](https://openai.com) - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) @@ -75,42 +81,43 @@ Some cool features of AnythingLLM - [LocalAi (all)](https://localai.io/) **Supported Vector Databases:** + - [LanceDB](https://github.com/lancedb/lancedb) (default) - [Pinecone](https://pinecone.io) - [Chroma](https://trychroma.com) - [Weaviate](https://weaviate.io) - [QDrant](https://qdrant.tech) - ### Technical Overview + This monorepo consists of three main sections: + - `frontend`: A viteJS + React frontend that you can run to easily create and manage all your content the LLM can use. - `server`: A NodeJS express server to handle all the interactions and do all the vectorDB management and LLM interactions. - `docker`: Docker instructions and build process + information for building from source. - `collector`: NodeJS express server that process and parses documents from the UI. ## 🛳 Self Hosting -Mintplex Labs & the community maintain a number of deployment methods, scripts, and templates that you can use to run AnythingLLM locally. Refer to the table below to read how to deploy on your preferred environment or to automatically deploy. -| Docker | AWS | GCP | Digital Ocean | Render.com | -|----------------------------------------|----:|-----|---------------|------------| -| [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][aws-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] | +Mintplex Labs & the community maintain a number of deployment methods, scripts, and templates that you can use to run AnythingLLM locally. Refer to the table below to read how to deploy on your preferred environment or to automatically deploy. +| Docker | AWS | GCP | Digital Ocean | Render.com | +|----------------------------------------|----:|-----|---------------|------------| +| [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][aws-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] | ## How to setup for development + - `yarn setup` To fill in the required `.env` files you'll need in each of the application sections (from root of repo). - Go fill those out before proceeding. Ensure `server/.env.development` is filled or else things won't work right. - `yarn dev:server` To boot the server locally (from root of repo). - `yarn dev:frontend` To boot the frontend locally (from root of repo). - `yarn dev:collector` To then run the document collector (from root of repo). - - - [Learn about documents](./server/storage/documents/DOCUMENTS.md) [Learn about vector caching](./server/storage/vector-cache/VECTOR_CACHE.md) ## Contributing + - create issue - create PR with branch name format of `-` - yee haw let's merge @@ -119,12 +126,15 @@ Mintplex Labs & the community maintain a number of deployment methods, scripts, Telemetry for AnythingLLM ## Telemetry + AnythingLLM by Mintplex Labs Inc contains a telemetry feature that collects anonymous usage information. ### Why? + We use this information to help us understand how AnythingLLM is used, to help us prioritize work on new features and bug fixes, and to help us improve AnythingLLM's performance and stability. ### Opting out + Set `DISABLE_TELEMETRY` in your server or docker .env settings to "true" to opt out of telemetry. ``` @@ -132,7 +142,9 @@ DISABLE_TELEMETRY="true" ``` ### What do you explicitly track? + We will only track usage details that help us make product and roadmap decisions, specifically: + - Version of your installation - When a document is added or removed. No information _about_ the document. Just that the event occurred. This gives us an idea of use. - Type of vector database in use. Let's us know which vector database provider is the most used to prioritize changes when updates arrive for that provider. @@ -160,6 +172,7 @@ Copyright © 2023 [Mintplex Labs][profile-link].
This project is [MIT](./LICENSE) licensed. + [back-to-top]: https://img.shields.io/badge/-BACK_TO_TOP-222628?style=flat-square [profile-link]: https://github.com/mintplex-labs [vector-admin]: https://github.com/mintplex-labs/vector-admin diff --git a/docker/.env.example b/docker/.env.example index 9b2b24c3f..2f8b2ff35 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -40,6 +40,10 @@ GID='1000' # OLLAMA_MODEL_PREF='llama2' # OLLAMA_MODEL_TOKEN_LIMIT=4096 +# LLM_PROVIDER='togetherai' +# TOGETHER_AI_API_KEY='my-together-ai-key' +# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1' + ########################################### ######## Embedding API SElECTION ########## ########################################### diff --git a/frontend/src/components/LLMSelection/TogetherAiOptions/index.jsx b/frontend/src/components/LLMSelection/TogetherAiOptions/index.jsx new file mode 100644 index 000000000..e526b3afe --- /dev/null +++ b/frontend/src/components/LLMSelection/TogetherAiOptions/index.jsx @@ -0,0 +1,95 @@ +import System from "@/models/system"; +import { useState, useEffect } from "react"; + +export default function TogetherAiOptions({ settings }) { + return ( +
+
+ + +
+ +
+ ); +} +function TogetherAiModelSelection({ settings }) { + const [groupedModels, setGroupedModels] = useState({}); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function findCustomModels() { + setLoading(true); + const { models } = await System.customModels("togetherai"); + + if (models?.length > 0) { + const modelsByOrganization = models.reduce((acc, model) => { + acc[model.organization] = acc[model.organization] || []; + acc[model.organization].push(model); + return acc; + }, {}); + + setGroupedModels(modelsByOrganization); + } + + setLoading(false); + } + findCustomModels(); + }, []); + + if (loading || Object.keys(groupedModels).length === 0) { + return ( +
+ + +
+ ); + } + + return ( +
+ + +
+ ); +} diff --git a/frontend/src/media/llmprovider/togetherai.png b/frontend/src/media/llmprovider/togetherai.png new file mode 100644 index 0000000000000000000000000000000000000000..95e796a6b8209b7b932cf93751e4dce33b9729d1 GIT binary patch literal 8356 zcmeHN_fwNyx21?65fBxW{t6-j0i=sGQFr`}ocMAMV_l?}ukH$>ijm*=O&y*IGNDUg)UN(X!H#k&)49s4MG}k&%D- z`_NDTpD;{ir2;SHp89If$j7?DA;6n!uhdOF$;d#r{yyYnDQS1f$o|pLP*yU0leRf^ z9KtkI-?_V=IqCWNfd1V98AaCntoOJ1xhW{JU^n2|6y$m-12GcM5dzwIQFJr{{8#SZ zD0-T!to~(Ety+4yzt=3_sDBzmnR)DdNs+AJ+*~od!B6U;CgUX@qVJHAU2vnxzAIUi zzj%+K_;xjv>M0!s=q>9NW->_{FqzX0DB0jY|NX*$1(Eox(orU zH~jc?GXg_;8V=V6g^n@{1Q3EVq`cs6wFJ_|KJFv#Bj(v+U#htDatLwre^=NL%KL{( zE!%$N6dR%{1^D^r_qowG{H_cq@Q8@$jvHj0*PAT~u&}VirE`Rab@8Ax*wyH{gipOh z@ms;}J&9fYF~E;9LUm&hSd*_uRf!{=pFeGEWV1nix2LsrA^YHhkW0y;3f0kEmhC#M zn7DY5PAJCTaz3*#^DKtH8nDX!V*{1+$EeF+yJfhmP-KlV&={8N_f1k@nZ# zhSs>x#B4PFXA#X%X?jDw&zgCu*Xq-@A|;oNqTJlZ#>T%Pd3P%`?9t-#vV)ynpy$r) zk43vxK2^Bz73MS=uw40nmcOz4&kj3X+~`?l>$y0lIZXW?DP3Jx+GsNTaLsD>=70#u zcWbh{tLxn@nyBu@7w`~Mq9!LND+dQ9 zi%oRz9{aQ?1j+iYJQUxY+W)23rq-J&yWQa4(%`r6FgNEuSvyx(J~lRXyfp=a50?i4 z5p0>Pa(0;i9M=|p?DxaO8WG^Nj(lXNK3@r6A1O(2<%X#unnpd) zmLHjqRV2RB!iQK-H3gU$J5Hrk-J0Q4{1!qgO@HF+G-Ekzf6%2D#Vjm$u@GD1HcbmZ z|Jrp`#&2(tus(wOs$e0N*vpNcvZIVqzx&|Oairfu-{{$;4YtxTtYNsDWM^-$;fyuOe%Zq zae^MCB_;ILd1F06%1$#(%yXC`gB*uB{bGM_gI_|7`<1q{Pfv*r+ZVm+)qi3(C+uO= z!5USG0!#w*tdC|f#7knV4M+wPOk6GC6lr`>o%p)>m+ec|A$PO@f8p$dM9~~ozfu&t z6fyQjauqv9EE4qxPa?+JBd|%rJZi-JXtv8qDjuWZABI~2P84~I_+N4(FAs~1V&G@j zQ3sWkN$1V?i*3i^UCpW|8v=CobY$WS*rZ*)(CKGsnPOEFv;{PTQjfdhc*+ZdbOJZn z-2L5d@jK!&gya^J#T`4EiK;`I2oTPYLSo~P_7l5^K3Bc(rqc`!RjPcoPx^5|qafj? zBA?+{K?clv{M>-2nVXRw^=*Rt-?0|=%phj{WIJi;SZD6kA(9>Bj>tX2m&C-xY*hM& zBAn`EY$>#fsGq42I^)X=m5+#%io)40L~OSt9p&)y$}`Uv-o2K?6>{=3xDsMe^RdCD zJNs7*{3;z7&ME?{KS?Gt@%|S)n|7E2&7)es9s)q^R( z@HDl2p|5LIDOyEmJ?F3(Y~>8J)^Jox`F^CD-Ow)SaGKrulrN%IBjbaBQ7_QF??(yL~$dIp84 z!P#kfD=#abA77YzTB$&}nM~8{yf(JZLq`_|#mhn)+FNYWZt4rIgKGO$;q;u`(X2RA zxmiq#Y(PIy$~z8&B@mNL`G4V~nq26Q>%7Ulo1k;V2gYk{S~(D1;|>nFS6}Rz_2|Hb zX70^L{-bdgwS#4BsR)F93S;Qf+{x}jew)JQy|b^PL=pJr!Ja?u-ZLaTU@utD08W8E+mt!7`Y2I@; zXNs!e&GE)uxC$d*Q!2Q-rkV!#Q_cO_2l-aVwBc4iTApJ4As<&Rh zZcm$45*huE{97MfCq(igtUNIYuC4fypB`LD8y2-xR5VE0i3y!AW!I;gsP>z-q;fV4 zFlOb!B83sdzw0HP^LS^nH4M&^&6%wi!s(8N;*7sG9Z~1=&^fSZ`K0zr+jQONjuGS4 zJd?SYht-JooedTm#8@TLCehmW1WPy@FR$XMf?%eGEcUInHQK&<8gV@PJqW=V77csv z4GIH>!AfkrajBOU-qD)M$9$bxh`M<%vhvg7Y@uG2*>B{Z&#QQ$2AA_{S0w*nv4M%1 z{6GVLt=F6N<&w%l!__CI!UtYXGd~Bjs;?kdTkYp=Hepy5-O}K)iNcmIS7z_&!)SFz zJ$xDgJm34Xc&FhQ4R)QGZjvYDJx2H2rwf_`z{Q8}Q8X#b0D$gwjsN|(U^cyMCHg54 z0Vmth)7>3cFWC9k-dv`5R5pCz{*O$b#oJB3#~Cvrq^2F{P3}Ve!fmaT`;u*zR=i+2 zf*{*miF9X5htuS&48Cp)r$|b4wXAUv!q;vI59L9U*c&i|`p(RE2zA)Wnu(KZD6^sB zGOLEI`Cm~~WA`GWH#(OfNg7k=&ky%f#R<}0_PVb=%0^Ak*P-OPWY*`#yh-}IY#$gEc2v`z)p`l!VnQkY@YZA;js zDO=#yBeG{!gE!ZVAcu%s`Ikmmk40=ZUjLlbFglig>Nq@7YU&^)>)x~2eKd1IzcJ&! z?e>)dl5L#7EfaZQKUs1=!YY_JHdu}L`sG7;Ea#By6y*{kSR}QaheTuuM8*^y{MKdh z4+DCk_^%HV0~`b<=`pK(u6GUIt80EDOY62Bqa&5yx=fV^Hls>lxD5Gql0w-IyNpM> z*P+u6MoG!tDFGo_@~c{;`4xk%^`$LlbrfzdLitNTtw2zk*98*i?l(MMob7!6`6^Ne{GL`w8aCONEs^D2Ho=72Ei9o zR7r0#I(2Vc8CC=zqp!cIYr%;l(uTE~#8>q=_7d`U_gDL3`R zdp)V%r$MDkyQjF_h%PajAZ(xU^Yd@H!>&fMCmg-b;eZ;<1SqcDF0ZIqACa^ax2y{c z3rIwvz4XrTzfz0eoe<#7g0lvL~Zo8 zD`!>&dtK~dMe;e9Cfu8rI5RM36C+kZ_N?+NLP_*iL8p0zAtq%y7RR8Q39%1gPFV}0 zqO!?}@Z*51$b)6A3i?$9(bEwpCDhWS#yo^6GHR;dUdpsh=}PQQm}*NkD<7z;oLt#< zILkww7>I72M{kBQ%@lc8ikPUh?Jjg`C%x#5=2-u`ZEB^WcFu%5!83vQZdEw6oEMT; zkS7yf6UbR^({y&P&+JJo&e*TR(OQGncXhP8GL3Hle~CNZKU~)?M*A&1z<$)DLuE|W zpQn9XdiL3_-qsqfoFrg8={Y+(?j6`KI$QN8M|^Fj1EX@#Z?jdmy}m}Y_8 z0eg6^n*@G+{Q{sS`Js*>CpPN)2-O7kH3iD`-ehs4tRjz5INyCJ^W&n>B@%)MR`cNo zw83X9@<=ry{$R{8=G2U$`1ynnV+bScjoVaXl|6pBpW}%pq~)V7m=UU)By1rTEx0-K zVkRKTy#%&J*cg}Raz|B_vNgOHn`&93M*;NuTTLo+)89g|hv%?Dx%yI;Jh|^tA(b^M zrOiH%SOg$7c`YEB&aCn`jevZBkD>hjWM(qlrocX2XupQ9H4-qqZfgTJX=iVNLiqXz z*boUu-z$*SHH6&BP(+}F47cv)c$9>c5QJ?A6Ti?&P<}>>`WDNgmJ2pVV??bW*7B1{ zN?4Z5uDP}&$E&RmuORg_H6!8X@Uie8j|NyowSVv0ZXT|W2r;}ntvl@hc~ErA*V=T8 zGSb>FZw8CZ3sRn5<bIs@Vrsiqz*~9hWuqW$ByRPzlOZ2Gimh&xg_BYS1I9yE4D6oc;Wew;n z#=q6pa|i$%2_DcK-$V2u+;6Z6RyC>M9jNkNzbDkLY=M<1*Ug7z@U;M+Zmms&`+fiY znkQRJuGz>LkIB&Gg95rKWgf=??TnAMXjMIAL%}MfxK*BjkW|BWp&-T6c8OO&tK+Gf z>Q9qd_*%w`u6CoZ%gpRq zM}zRjSH>s=4Bs|7d*hYfnc~*JB)xA2^?WcehNo zix;6qIbn5c&1Y?pAMYzT8Opoa>h_(ovjw+n?f3J zQa(G1^h0N9JMp)~f8KP_8E))r($-t4R_T(@_0m&+55dtuz!7TUYUGX?~dQORw z+oARqbNe{$oRPq4fAyKhi?iY~@JRTiP!dlBL#hMEt)4#RmxDWuL`ulPI(|f=G1C`& zJG}!_@4KC`+Vp+^O0ytko5@*)seeUmT=fW;kirEt2jlwoUfc<__asirVC}&qU5bgB z;n_M<``g%K>h+Q6LqA6C{tIO6d%vs5)hv5{fMg!j^{8ZtqLp!OQ`&81u!`aBo`@Aa z5GsLBH)T3?955PWY662IWnPa;sWt`={1_t`_ zo#lzPIqipNDElZCY>F`eSDqN+~hyjDM@7vaPmOQ z!;pueya8hpfvHF#5zEFE$9%w+$Y2xNnfE*1`2#{tHeQsq;J6fcS{7C=e0cV)&L4&B zPhX^2TWQP0Ww`kl`j&CZYb%1DKrO{D7q22E%rCWU0R529uWm52FaC%gY1xzrfJ}-d z8Mn~mb4ogoo^o@l7NA=#F}+1%;-{lm!zT4r>Df3BqIt!__!Ib2nH4tfoJhh4AFsf| zDLFN*yB$QEDqcv1vBQ<{#?`6pw)x5a#PPd%n0V`yiYF~Lq>HmlB0rsFf98%xaE3P_ zlXz%i5T#IU?5b#$jj#b*x%>4}3d+h(R4(gBylYOCBN5v$`MwqHlOd{|L{A$xw^Bnl zh#ktJsY9YPEe6c6azzuk&h63Wsa*w)PesveQUDcISfMn}2<25sk;FloMasy#_vKyEEBjyHKCvh|>?s@Oz2kXW}=WD4Zl`{vS-12J~~@|a*&QwBkFF6-cT zvpxx=1pCbb?F6Zh=iPoX{RPd(&GwyrT4{5pM%j!|lMtM{9EPWaLZv>UC3!n1-l!t* zSvH`306R>3K{MpxirS`K&=ik?LMX3R$-&oiX_W8gpy`t!kN%kk$FfRIxfy@(lrSFV z42tx6)Y-hdJlqG1;pKw}&Q>r9HUJ9VKL_?ree=5inB~o%dtF|CqWIB!+rH}BB6^_2 zo+Ux`OhdDe`c!#=bD2e4*L!$wqNt3wRU$HDpT7s@>7ql_MmhYzSxE zk>q@7J=S8$IPFtmE)cD1?KW+Hofdn+^7ay6z4LKq^`ZkoMeFyVWk)zYuwP?9LmPpsWm3SI2-yO|KwpXw~tnI5X(W zy><;1b7zC#L%%-@e9OR={TcGkbxvCkf!cj91YgoTFKi=Z$=dxc&IEXExY|JGr>gq{Sc`|bC zd3bG$vjtRwVtxGZLbdFJAD!USqO_%mQ#nIaI0Vn`yWOUi`E1`g+G1{13z(pDzB9zb)bU@SJ6b01fDB{x zYECZDg=MtUn^BmoJim-2bkMN{>X80i2fqeV0za{Pd0RuUutq85cH}_WhixIztHYC$ zr@^c}5g`|%jSv=(cUk~L@Gh3${uO|t5G0XU)q&6{n3wky&Z<}*iU?^b0_;m&|D}6IOfr0j8^cosI(9u;RI1Erg^5gvpv$P@ zNf0z_b>hzXf5$R00NsesDX1`B$ zzdBd=YE$L?YhnjOKL6|5;2YZM5+mGi+(JnV>Ou$zrJ70QXR7TBFx9NqvJ0CEpi*HtP2W+ombtyRs}eWSc*HS+ABW@nvzjbo-+!k zbJP`b)PMPOwhOr~gD?ObEs?W}-3c&h_tQ7i7l4KQ37-#OnW*nk8qOH?*LPG^?MRpP zz0lQWbxRVlng%dR>z+;57oE)s2phDYtX$ikLE~dr`x2PXqA22gH~ud>phv74Tn0bO zf7^~!CS`|#)b3vo$ns3WoZm~8Z~_-7!=LG7Jl2qMFmrNJhYfH8mcLK3;ZhigSzW4| z0&9#g9ssmM^JEpoCz*iP8!59=w4EjCr{tEvxdDe61Q*IJ>awX0nNcu3v3VZhv-Mj~ zK*PF0qnrU?-DwxOMe3AtgHfsFvRWY1_6ilu=44AL)S{l%!;t9#gxlL;Yhz5`@kzBa zpR>^VG~n?RDz@Fa$sBm`!%(j?@5xwehM>3bdYWus9zUTeHG8M`l#&j`hX*Vn?SiTm z`9zQ$k_Po%d}`P*$EnJ;yJI4Typ|CD;f7Gu=-^loEr$SiuY+~Cqt;conNjn0Dy{jZifIP3GC(_wOIBbGG}r*9U5@X zsV5=1{K2b+#wOc0r+fBT*+*=OBahzIn3Czz0BT|7xr&jX5z&r#0f>%`4Hu2ffa9&D zq{k0q$b-a(;(&;oOaz=M^BWHR7I0+^YSj) zl->Hq6v{MNXi#v!qBVfac;CZ!xr{mX?+spKbw8z4~3T z#Z%*wNvp}v?o$qn=(*zfcrh`ti$*EVx4QqGjsAB={QqTE-a(?+x3LO%XsPWA{L_F; NLq$iq^qKX${{dl*iT?lq literal 0 HcmV?d00001 diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx index d72cf3c2b..287716222 100644 --- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx +++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx @@ -11,6 +11,7 @@ import GeminiLogo from "@/media/llmprovider/gemini.png"; import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; +import TogetherAILogo from "@/media/llmprovider/togetherai.png"; import PreLoader from "@/components/Preloader"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions"; @@ -22,6 +23,7 @@ import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions"; import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions"; import LLMItem from "@/components/LLMSelection/LLMItem"; import { MagnifyingGlass } from "@phosphor-icons/react"; +import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions"; export default function GeneralLLMPreference() { const [saving, setSaving] = useState(false); @@ -127,6 +129,13 @@ export default function GeneralLLMPreference() { options: , description: "Run LLMs locally on your own machine.", }, + { + name: "Together AI", + value: "togetherai", + logo: TogetherAILogo, + options: , + description: "Run open source models from Together AI.", + }, { name: "Native", value: "native", diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx index db285f128..d9fea4c62 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx @@ -6,6 +6,7 @@ import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import GeminiLogo from "@/media/llmprovider/gemini.png"; import OllamaLogo from "@/media/llmprovider/ollama.png"; +import TogetherAILogo from "@/media/llmprovider/togetherai.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import ChromaLogo from "@/media/vectordbs/chroma.png"; @@ -25,7 +26,7 @@ const LLM_SELECTION_PRIVACY = { name: "OpenAI", description: [ "Your chats will not be used for training", - "Your prompts and document text used in responses are visible to OpenAI", + "Your prompts and document text used in response creation are visible to OpenAI", ], logo: OpenAiLogo, }, @@ -41,7 +42,7 @@ const LLM_SELECTION_PRIVACY = { name: "Anthropic", description: [ "Your chats will not be used for training", - "Your prompts and document text used in responses are visible to Anthropic", + "Your prompts and document text used in response creation are visible to Anthropic", ], logo: AnthropicLogo, }, @@ -49,7 +50,7 @@ const LLM_SELECTION_PRIVACY = { name: "Google Gemini", description: [ "Your chats are de-identified and used in training", - "Your prompts and document text are visible in responses to Google", + "Your prompts and document text used in response creation are visible to Google", ], logo: GeminiLogo, }, @@ -81,6 +82,14 @@ const LLM_SELECTION_PRIVACY = { ], logo: AnythingLLMIcon, }, + togetherai: { + name: "TogetherAI", + description: [ + "Your chats will not be used for training", + "Your prompts and document text used in response creation are visible to TogetherAI", + ], + logo: TogetherAILogo, + }, }; const VECTOR_DB_PRIVACY = { diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx index 887681985..dc060594e 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx @@ -7,6 +7,7 @@ import GeminiLogo from "@/media/llmprovider/gemini.png"; import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; +import TogetherAILogo from "@/media/llmprovider/togetherai.png"; import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions"; @@ -21,6 +22,7 @@ import System from "@/models/system"; import paths from "@/utils/paths"; import showToast from "@/utils/toast"; import { useNavigate } from "react-router-dom"; +import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions"; const TITLE = "LLM Preference"; const DESCRIPTION = @@ -100,6 +102,13 @@ export default function LLMPreference({ options: , description: "Run LLMs locally on your own machine.", }, + { + name: "Together AI", + value: "togetherai", + logo: TogetherAILogo, + options: , + description: "Run open source models from Together AI.", + }, { name: "Native", value: "native", diff --git a/server/.env.example b/server/.env.example index 5b159a03d..e41ab63d0 100644 --- a/server/.env.example +++ b/server/.env.example @@ -37,6 +37,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea # OLLAMA_MODEL_PREF='llama2' # OLLAMA_MODEL_TOKEN_LIMIT=4096 +# LLM_PROVIDER='togetherai' +# TOGETHER_AI_API_KEY='my-together-ai-key' +# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1' + ########################################### ######## Embedding API SElECTION ########## ########################################### diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index a66f93e19..29c2238ff 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -133,6 +133,18 @@ const SystemSettings = { OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF, OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT, + // For embedding credentials when ollama is selected. + OpenAiKey: !!process.env.OPEN_AI_KEY, + AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT, + AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY, + AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, + } + : {}), + ...(llmProvider === "togetherai" + ? { + TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY, + TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF, + // For embedding credentials when ollama is selected. OpenAiKey: !!process.env.OPEN_AI_KEY, AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT, @@ -143,6 +155,12 @@ const SystemSettings = { ...(llmProvider === "native" ? { NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF, + + // For embedding credentials when ollama is selected. + OpenAiKey: !!process.env.OPEN_AI_KEY, + AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT, + AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY, + AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, } : {}), }; diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js new file mode 100644 index 000000000..df64c413e --- /dev/null +++ b/server/utils/AiProviders/togetherAi/index.js @@ -0,0 +1,198 @@ +const { chatPrompt } = require("../../chats"); + +function togetherAiModels() { + const { MODELS } = require("./models.js"); + return MODELS || {}; +} + +class TogetherAiLLM { + constructor(embedder = null) { + const { Configuration, OpenAIApi } = require("openai"); + if (!process.env.TOGETHER_AI_API_KEY) + throw new Error("No TogetherAI API key was set."); + + const config = new Configuration({ + basePath: "https://api.together.xyz/v1", + apiKey: process.env.TOGETHER_AI_API_KEY, + }); + this.openai = new OpenAIApi(config); + this.model = process.env.TOGETHER_AI_MODEL_PREF; + this.limits = { + history: this.promptWindowLimit() * 0.15, + system: this.promptWindowLimit() * 0.15, + user: this.promptWindowLimit() * 0.7, + }; + + if (!embedder) + throw new Error( + "INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM." + ); + this.embedder = embedder; + } + + #appendContext(contextTexts = []) { + if (!contextTexts || !contextTexts.length) return ""; + return ( + "\nContext:\n" + + contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("") + ); + } + + allModelInformation() { + return togetherAiModels(); + } + + streamingEnabled() { + return "streamChat" in this && "streamGetChatCompletion" in this; + } + + // Ensure the user set a value for the token limit + // and if undefined - assume 4096 window. + promptWindowLimit() { + const availableModels = this.allModelInformation(); + return availableModels[this.model]?.maxLength || 4096; + } + + async isValidChatCompletionModel(model = "") { + const availableModels = this.allModelInformation(); + return availableModels.hasOwnProperty(model); + } + + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + const prompt = { + role: "system", + content: `${systemPrompt}${this.#appendContext(contextTexts)}`, + }; + return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; + } + + async isSafe(_input = "") { + // Not implemented so must be stubbed + return { safe: true, reasons: [] }; + } + + async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `Together AI chat: ${this.model} is not valid for chat completion!` + ); + + const textResponse = await this.openai + .createChatCompletion({ + model: this.model, + temperature: Number(workspace?.openAiTemp ?? 0.7), + n: 1, + messages: await this.compressMessages( + { + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }, + rawHistory + ), + }) + .then((json) => { + const res = json.data; + if (!res.hasOwnProperty("choices")) + throw new Error("Together AI chat: No results!"); + if (res.choices.length === 0) + throw new Error("Together AI chat: No results length!"); + return res.choices[0].message.content; + }) + .catch((error) => { + throw new Error( + `TogetherAI::createChatCompletion failed with: ${error.message}` + ); + }); + + return textResponse; + } + + async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `TogetherAI chat: ${this.model} is not valid for chat completion!` + ); + + const streamRequest = await this.openai.createChatCompletion( + { + model: this.model, + stream: true, + temperature: Number(workspace?.openAiTemp ?? 0.7), + n: 1, + messages: await this.compressMessages( + { + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }, + rawHistory + ), + }, + { responseType: "stream" } + ); + return { type: "togetherAiStream", stream: streamRequest }; + } + + async getChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `TogetherAI chat: ${this.model} is not valid for chat completion!` + ); + + const { data } = await this.openai.createChatCompletion({ + model: this.model, + messages, + temperature, + }); + + if (!data.hasOwnProperty("choices")) return null; + return data.choices[0].message.content; + } + + async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `TogetherAI chat: ${this.model} is not valid for chat completion!` + ); + + const streamRequest = await this.openai.createChatCompletion( + { + model: this.model, + stream: true, + messages, + temperature, + }, + { responseType: "stream" } + ); + return { type: "togetherAiStream", stream: streamRequest }; + } + + // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations + async embedTextInput(textInput) { + return await this.embedder.embedTextInput(textInput); + } + async embedChunks(textChunks = []) { + return await this.embedder.embedChunks(textChunks); + } + + async compressMessages(promptArgs = {}, rawHistory = []) { + const { messageArrayCompressor } = require("../../helpers/chat"); + const messageArray = this.constructPrompt(promptArgs); + return await messageArrayCompressor(this, messageArray, rawHistory); + } +} + +module.exports = { + TogetherAiLLM, + togetherAiModels, +}; diff --git a/server/utils/AiProviders/togetherAi/models.js b/server/utils/AiProviders/togetherAi/models.js new file mode 100644 index 000000000..ad940bc39 --- /dev/null +++ b/server/utils/AiProviders/togetherAi/models.js @@ -0,0 +1,226 @@ +const MODELS = { + "togethercomputer/alpaca-7b": { + id: "togethercomputer/alpaca-7b", + organization: "Stanford", + name: "Alpaca (7B)", + maxLength: 2048, + }, + "Austism/chronos-hermes-13b": { + id: "Austism/chronos-hermes-13b", + organization: "Austism", + name: "Chronos Hermes (13B)", + maxLength: 2048, + }, + "togethercomputer/CodeLlama-13b-Instruct": { + id: "togethercomputer/CodeLlama-13b-Instruct", + organization: "Meta", + name: "Code Llama Instruct (13B)", + maxLength: 8192, + }, + "togethercomputer/CodeLlama-34b-Instruct": { + id: "togethercomputer/CodeLlama-34b-Instruct", + organization: "Meta", + name: "Code Llama Instruct (34B)", + maxLength: 8192, + }, + "togethercomputer/CodeLlama-7b-Instruct": { + id: "togethercomputer/CodeLlama-7b-Instruct", + organization: "Meta", + name: "Code Llama Instruct (7B)", + maxLength: 8192, + }, + "DiscoResearch/DiscoLM-mixtral-8x7b-v2": { + id: "DiscoResearch/DiscoLM-mixtral-8x7b-v2", + organization: "DiscoResearch", + name: "DiscoLM Mixtral 8x7b", + maxLength: 32768, + }, + "togethercomputer/falcon-40b-instruct": { + id: "togethercomputer/falcon-40b-instruct", + organization: "TII UAE", + name: "Falcon Instruct (40B)", + maxLength: 2048, + }, + "togethercomputer/falcon-7b-instruct": { + id: "togethercomputer/falcon-7b-instruct", + organization: "TII UAE", + name: "Falcon Instruct (7B)", + maxLength: 2048, + }, + "togethercomputer/GPT-NeoXT-Chat-Base-20B": { + id: "togethercomputer/GPT-NeoXT-Chat-Base-20B", + organization: "Together", + name: "GPT-NeoXT-Chat-Base (20B)", + maxLength: 2048, + }, + "togethercomputer/llama-2-13b-chat": { + id: "togethercomputer/llama-2-13b-chat", + organization: "Meta", + name: "LLaMA-2 Chat (13B)", + maxLength: 4096, + }, + "togethercomputer/llama-2-70b-chat": { + id: "togethercomputer/llama-2-70b-chat", + organization: "Meta", + name: "LLaMA-2 Chat (70B)", + maxLength: 4096, + }, + "togethercomputer/llama-2-7b-chat": { + id: "togethercomputer/llama-2-7b-chat", + organization: "Meta", + name: "LLaMA-2 Chat (7B)", + maxLength: 4096, + }, + "togethercomputer/Llama-2-7B-32K-Instruct": { + id: "togethercomputer/Llama-2-7B-32K-Instruct", + organization: "Together", + name: "LLaMA-2-7B-32K-Instruct (7B)", + maxLength: 32768, + }, + "mistralai/Mistral-7B-Instruct-v0.1": { + id: "mistralai/Mistral-7B-Instruct-v0.1", + organization: "MistralAI", + name: "Mistral (7B) Instruct v0.1", + maxLength: 4096, + }, + "mistralai/Mistral-7B-Instruct-v0.2": { + id: "mistralai/Mistral-7B-Instruct-v0.2", + organization: "MistralAI", + name: "Mistral (7B) Instruct v0.2", + maxLength: 32768, + }, + "mistralai/Mixtral-8x7B-Instruct-v0.1": { + id: "mistralai/Mixtral-8x7B-Instruct-v0.1", + organization: "MistralAI", + name: "Mixtral-8x7B Instruct", + maxLength: 32768, + }, + "Gryphe/MythoMax-L2-13b": { + id: "Gryphe/MythoMax-L2-13b", + organization: "Gryphe", + name: "MythoMax-L2 (13B)", + maxLength: 4096, + }, + "NousResearch/Nous-Hermes-llama-2-7b": { + id: "NousResearch/Nous-Hermes-llama-2-7b", + organization: "NousResearch", + name: "Nous Hermes LLaMA-2 (7B)", + maxLength: 4096, + }, + "NousResearch/Nous-Hermes-Llama2-13b": { + id: "NousResearch/Nous-Hermes-Llama2-13b", + organization: "NousResearch", + name: "Nous Hermes Llama-2 (13B)", + maxLength: 4096, + }, + "NousResearch/Nous-Hermes-Llama2-70b": { + id: "NousResearch/Nous-Hermes-Llama2-70b", + organization: "NousResearch", + name: "Nous Hermes Llama-2 (70B)", + maxLength: 4096, + }, + "NousResearch/Nous-Hermes-2-Yi-34B": { + id: "NousResearch/Nous-Hermes-2-Yi-34B", + organization: "NousResearch", + name: "Nous Hermes-2 Yi (34B)", + maxLength: 4096, + }, + "NousResearch/Nous-Capybara-7B-V1p9": { + id: "NousResearch/Nous-Capybara-7B-V1p9", + organization: "NousResearch", + name: "Nous Capybara v1.9 (7B)", + maxLength: 8192, + }, + "openchat/openchat-3.5-1210": { + id: "openchat/openchat-3.5-1210", + organization: "OpenChat", + name: "OpenChat 3.5 1210 (7B)", + maxLength: 8192, + }, + "teknium/OpenHermes-2-Mistral-7B": { + id: "teknium/OpenHermes-2-Mistral-7B", + organization: "teknium", + name: "OpenHermes-2-Mistral (7B)", + maxLength: 4096, + }, + "teknium/OpenHermes-2p5-Mistral-7B": { + id: "teknium/OpenHermes-2p5-Mistral-7B", + organization: "teknium", + name: "OpenHermes-2.5-Mistral (7B)", + maxLength: 4096, + }, + "Open-Orca/Mistral-7B-OpenOrca": { + id: "Open-Orca/Mistral-7B-OpenOrca", + organization: "OpenOrca", + name: "OpenOrca Mistral (7B) 8K", + maxLength: 8192, + }, + "garage-bAInd/Platypus2-70B-instruct": { + id: "garage-bAInd/Platypus2-70B-instruct", + organization: "garage-bAInd", + name: "Platypus2 Instruct (70B)", + maxLength: 4096, + }, + "togethercomputer/Pythia-Chat-Base-7B-v0.16": { + id: "togethercomputer/Pythia-Chat-Base-7B-v0.16", + organization: "Together", + name: "Pythia-Chat-Base (7B)", + maxLength: 2048, + }, + "togethercomputer/Qwen-7B-Chat": { + id: "togethercomputer/Qwen-7B-Chat", + organization: "Qwen", + name: "Qwen-Chat (7B)", + maxLength: 8192, + }, + "togethercomputer/RedPajama-INCITE-Chat-3B-v1": { + id: "togethercomputer/RedPajama-INCITE-Chat-3B-v1", + organization: "Together", + name: "RedPajama-INCITE Chat (3B)", + maxLength: 2048, + }, + "togethercomputer/RedPajama-INCITE-7B-Chat": { + id: "togethercomputer/RedPajama-INCITE-7B-Chat", + organization: "Together", + name: "RedPajama-INCITE Chat (7B)", + maxLength: 2048, + }, + "upstage/SOLAR-0-70b-16bit": { + id: "upstage/SOLAR-0-70b-16bit", + organization: "Upstage", + name: "SOLAR v0 (70B)", + maxLength: 4096, + }, + "togethercomputer/StripedHyena-Nous-7B": { + id: "togethercomputer/StripedHyena-Nous-7B", + organization: "Together", + name: "StripedHyena Nous (7B)", + maxLength: 32768, + }, + "lmsys/vicuna-7b-v1.5": { + id: "lmsys/vicuna-7b-v1.5", + organization: "LM Sys", + name: "Vicuna v1.5 (7B)", + maxLength: 4096, + }, + "lmsys/vicuna-13b-v1.5": { + id: "lmsys/vicuna-13b-v1.5", + organization: "LM Sys", + name: "Vicuna v1.5 (13B)", + maxLength: 4096, + }, + "lmsys/vicuna-13b-v1.5-16k": { + id: "lmsys/vicuna-13b-v1.5-16k", + organization: "LM Sys", + name: "Vicuna v1.5 16K (13B)", + maxLength: 16384, + }, + "zero-one-ai/Yi-34B-Chat": { + id: "zero-one-ai/Yi-34B-Chat", + organization: "01.AI", + name: "01-ai Yi Chat (34B)", + maxLength: 4096, + }, +}; + +module.exports.MODELS = MODELS; diff --git a/server/utils/AiProviders/togetherAi/scripts/.gitignore b/server/utils/AiProviders/togetherAi/scripts/.gitignore new file mode 100644 index 000000000..94a2dd146 --- /dev/null +++ b/server/utils/AiProviders/togetherAi/scripts/.gitignore @@ -0,0 +1 @@ +*.json \ No newline at end of file diff --git a/server/utils/AiProviders/togetherAi/scripts/chat_models.txt b/server/utils/AiProviders/togetherAi/scripts/chat_models.txt new file mode 100644 index 000000000..81c23bf4a --- /dev/null +++ b/server/utils/AiProviders/togetherAi/scripts/chat_models.txt @@ -0,0 +1,39 @@ +| Organization | Model Name | Model String for API | Max Seq Length | +| ------------- | ---------------------------- | -------------------------------------------- | -------------- | +| Stanford | Alpaca (7B) | togethercomputer/alpaca-7b | 2048 | +| Austism | Chronos Hermes (13B) | Austism/chronos-hermes-13b | 2048 | +| Meta | Code Llama Instruct (13B) | togethercomputer/CodeLlama-13b-Instruct | 8192 | +| Meta | Code Llama Instruct (34B) | togethercomputer/CodeLlama-34b-Instruct | 8192 | +| Meta | Code Llama Instruct (7B) | togethercomputer/CodeLlama-7b-Instruct | 8192 | +| DiscoResearch | DiscoLM Mixtral 8x7b | DiscoResearch/DiscoLM-mixtral-8x7b-v2 | 32768 | +| TII UAE | Falcon Instruct (40B) | togethercomputer/falcon-40b-instruct | 2048 | +| TII UAE | Falcon Instruct (7B) | togethercomputer/falcon-7b-instruct | 2048 | +| Together | GPT-NeoXT-Chat-Base (20B) | togethercomputer/GPT-NeoXT-Chat-Base-20B | 2048 | +| Meta | LLaMA-2 Chat (13B) | togethercomputer/llama-2-13b-chat | 4096 | +| Meta | LLaMA-2 Chat (70B) | togethercomputer/llama-2-70b-chat | 4096 | +| Meta | LLaMA-2 Chat (7B) | togethercomputer/llama-2-7b-chat | 4096 | +| Together | LLaMA-2-7B-32K-Instruct (7B) | togethercomputer/Llama-2-7B-32K-Instruct | 32768 | +| MistralAI | Mistral (7B) Instruct v0.1 | mistralai/Mistral-7B-Instruct-v0.1 | 4096 | +| MistralAI | Mistral (7B) Instruct v0.2 | mistralai/Mistral-7B-Instruct-v0.2 | 32768 | +| MistralAI | Mixtral-8x7B Instruct | mistralai/Mixtral-8x7B-Instruct-v0.1 | 32768 | +| Gryphe | MythoMax-L2 (13B) | Gryphe/MythoMax-L2-13b | 4096 | +| NousResearch | Nous Hermes LLaMA-2 (7B) | NousResearch/Nous-Hermes-llama-2-7b | 4096 | +| NousResearch | Nous Hermes Llama-2 (13B) | NousResearch/Nous-Hermes-Llama2-13b | 4096 | +| NousResearch | Nous Hermes Llama-2 (70B) | NousResearch/Nous-Hermes-Llama2-70b | 4096 | +| NousResearch | Nous Hermes-2 Yi (34B) | NousResearch/Nous-Hermes-2-Yi-34B | 4096 | +| NousResearch | Nous Capybara v1.9 (7B) | NousResearch/Nous-Capybara-7B-V1p9 | 8192 | +| OpenChat | OpenChat 3.5 1210 (7B) | openchat/openchat-3.5-1210 | 8192 | +| teknium | OpenHermes-2-Mistral (7B) | teknium/OpenHermes-2-Mistral-7B | 4096 | +| teknium | OpenHermes-2.5-Mistral (7B) | teknium/OpenHermes-2p5-Mistral-7B | 4096 | +| OpenOrca | OpenOrca Mistral (7B) 8K | Open-Orca/Mistral-7B-OpenOrca | 8192 | +| garage-bAInd | Platypus2 Instruct (70B) | garage-bAInd/Platypus2-70B-instruct | 4096 | +| Together | Pythia-Chat-Base (7B) | togethercomputer/Pythia-Chat-Base-7B-v0.16 | 2048 | +| Qwen | Qwen-Chat (7B) | togethercomputer/Qwen-7B-Chat | 8192 | +| Together | RedPajama-INCITE Chat (3B) | togethercomputer/RedPajama-INCITE-Chat-3B-v1 | 2048 | +| Together | RedPajama-INCITE Chat (7B) | togethercomputer/RedPajama-INCITE-7B-Chat | 2048 | +| Upstage | SOLAR v0 (70B) | upstage/SOLAR-0-70b-16bit | 4096 | +| Together | StripedHyena Nous (7B) | togethercomputer/StripedHyena-Nous-7B | 32768 | +| LM Sys | Vicuna v1.5 (7B) | lmsys/vicuna-7b-v1.5 | 4096 | +| LM Sys | Vicuna v1.5 (13B) | lmsys/vicuna-13b-v1.5 | 4096 | +| LM Sys | Vicuna v1.5 16K (13B) | lmsys/vicuna-13b-v1.5-16k | 16384 | +| 01.AI | 01-ai Yi Chat (34B) | zero-one-ai/Yi-34B-Chat | 4096 | \ No newline at end of file diff --git a/server/utils/AiProviders/togetherAi/scripts/parse.mjs b/server/utils/AiProviders/togetherAi/scripts/parse.mjs new file mode 100644 index 000000000..b96d40ab1 --- /dev/null +++ b/server/utils/AiProviders/togetherAi/scripts/parse.mjs @@ -0,0 +1,41 @@ +// Together AI does not provide a simple REST API to get models, +// so we have a table which we copy from their documentation +// https://docs.together.ai/edit/inference-models that we can +// then parse and get all models from in a format that makes sense +// Why this does not exist is so bizarre, but whatever. + +// To run, cd into this directory and run `node parse.mjs` +// copy outputs into the export in ../models.js + +// Update the date below if you run this again because TogetherAI added new models. +// Last Collected: Jan 10, 2023 + +import fs from "fs"; + +function parseChatModels() { + const fixed = {}; + const tableString = fs.readFileSync("chat_models.txt", { encoding: "utf-8" }); + const rows = tableString.split("\n").slice(2); + + rows.forEach((row) => { + const [provider, name, id, maxLength] = row.split("|").slice(1, -1); + const data = { + provider: provider.trim(), + name: name.trim(), + id: id.trim(), + maxLength: Number(maxLength.trim()), + }; + + fixed[data.id] = { + id: data.id, + organization: data.provider, + name: data.name, + maxLength: data.maxLength, + }; + }); + + fs.writeFileSync("chat_models.json", JSON.stringify(fixed, null, 2), "utf-8"); + return fixed; +} + +parseChatModels(); diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index a6ade1819..84058c8de 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -262,6 +262,96 @@ function handleStreamResponses(response, stream, responseProps) { }); } + if ((stream.type = "togetherAiStream")) { + return new Promise((resolve) => { + let fullText = ""; + let chunk = ""; + stream.stream.data.on("data", (data) => { + const lines = data + ?.toString() + ?.split("\n") + .filter((line) => line.trim() !== ""); + + for (const line of lines) { + let validJSON = false; + const message = chunk + line.replace(/^data: /, ""); + + if (message !== "[DONE]") { + // JSON chunk is incomplete and has not ended yet + // so we need to stitch it together. You would think JSON + // chunks would only come complete - but they don't! + try { + JSON.parse(message); + validJSON = true; + } catch {} + + if (!validJSON) { + // It can be possible that the chunk decoding is running away + // and the message chunk fails to append due to string length. + // In this case abort the chunk and reset so we can continue. + // ref: https://github.com/Mintplex-Labs/anything-llm/issues/416 + try { + chunk += message; + } catch (e) { + console.error(`Chunk appending error`, e); + chunk = ""; + } + continue; + } else { + chunk = ""; + } + } + + if (message == "[DONE]") { + writeResponseChunk(response, { + uuid, + sources, + type: "textResponseChunk", + textResponse: "", + close: true, + error: false, + }); + resolve(fullText); + } else { + let finishReason = null; + let token = ""; + try { + const json = JSON.parse(message); + token = json?.choices?.[0]?.delta?.content; + finishReason = json?.choices?.[0]?.finish_reason || null; + } catch { + continue; + } + + if (token) { + fullText += token; + writeResponseChunk(response, { + uuid, + sources: [], + type: "textResponseChunk", + textResponse: token, + close: false, + error: false, + }); + } + + if (finishReason !== null) { + writeResponseChunk(response, { + uuid, + sources, + type: "textResponseChunk", + textResponse: "", + close: true, + error: false, + }); + resolve(fullText); + } + } + } + }); + }); + } + // If stream is not a regular OpenAI Stream (like if using native model, Ollama, or most LangChain interfaces) // we can just iterate the stream content instead. if (!stream.hasOwnProperty("data")) { diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index 5bd7b299e..54976895e 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -1,4 +1,11 @@ -const SUPPORT_CUSTOM_MODELS = ["openai", "localai", "ollama", "native-llm"]; +const { togetherAiModels } = require("../AiProviders/togetherAi"); +const SUPPORT_CUSTOM_MODELS = [ + "openai", + "localai", + "ollama", + "native-llm", + "togetherai", +]; async function getCustomModels(provider = "", apiKey = null, basePath = null) { if (!SUPPORT_CUSTOM_MODELS.includes(provider)) @@ -11,6 +18,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) { return await localAIModels(basePath, apiKey); case "ollama": return await ollamaAIModels(basePath, apiKey); + case "togetherai": + return await getTogetherAiModels(); case "native-llm": return nativeLLMModels(); default: @@ -92,6 +101,21 @@ async function ollamaAIModels(basePath = null, _apiKey = null) { return { models, error: null }; } +async function getTogetherAiModels() { + const knownModels = togetherAiModels(); + if (!Object.keys(knownModels).length === 0) + return { models: [], error: null }; + + const models = Object.values(knownModels).map((model) => { + return { + id: model.id, + organization: model.organization, + name: model.name, + }; + }); + return { models, error: null }; +} + function nativeLLMModels() { const fs = require("fs"); const path = require("path"); diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index bde5e8a0a..ac7029362 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -46,6 +46,9 @@ function getLLMProvider() { case "ollama": const { OllamaAILLM } = require("../AiProviders/ollama"); return new OllamaAILLM(embedder); + case "togetherai": + const { TogetherAiLLM } = require("../AiProviders/togetherAi"); + return new TogetherAiLLM(embedder); case "native": const { NativeLLM } = require("../AiProviders/native"); return new NativeLLM(embedder); diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index 0f891f1ba..e6e97df5f 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -170,6 +170,16 @@ const KEY_MAPPING = { checks: [], }, + // Together Ai Options + TogetherAiApiKey: { + envKey: "TOGETHER_AI_API_KEY", + checks: [isNotEmpty], + }, + TogetherAiModelPref: { + envKey: "TOGETHER_AI_MODEL_PREF", + checks: [isNotEmpty], + }, + // System Settings AuthToken: { envKey: "AUTH_TOKEN", @@ -233,7 +243,7 @@ function validOllamaLLMBasePath(input = "") { } function supportedLLM(input = "") { - return [ + const validSelection = [ "openai", "azure", "anthropic", @@ -242,7 +252,9 @@ function supportedLLM(input = "") { "localai", "ollama", "native", + "togetherai", ].includes(input); + return validSelection ? null : `${input} is not a valid LLM provider.`; } function validGeminiModel(input = "") { From 4e2c0f04b4cce4d7618145658292dcd35862c038 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Wed, 10 Jan 2024 13:18:48 -0800 Subject: [PATCH 12/41] Dynamic vector count on workspace settings (#567) * Dynamic vector count on workspace settings Add count to be workspace specific, fallback to system count Update layout of data in settings Update OpenAI per-token embedding price * linting --- .../Modals/MangeWorkspace/Documents/index.jsx | 4 +- .../Modals/MangeWorkspace/Settings/index.jsx | 76 +++++++++++-------- .../Modals/MangeWorkspace/index.jsx | 5 +- frontend/src/models/system.js | 6 +- server/endpoints/invite.js | 4 +- server/endpoints/system.js | 26 ++++--- 6 files changed, 76 insertions(+), 45 deletions(-) diff --git a/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx index 52b818ffa..ff7f0dd65 100644 --- a/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx +++ b/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx @@ -6,7 +6,9 @@ import Directory from "./Directory"; import showToast from "../../../../utils/toast"; import WorkspaceDirectory from "./WorkspaceDirectory"; -const COST_PER_TOKEN = 0.0004; +// OpenAI Cost per token for text-ada-embedding +// ref: https://openai.com/pricing#:~:text=%C2%A0/%201K%20tokens-,Embedding%20models,-Build%20advanced%20search +const COST_PER_TOKEN = 0.0000001; // $0.0001 / 1K tokens export default function DocumentSettings({ workspace, diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx index 29d2bfa73..2fce91e1f 100644 --- a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx +++ b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx @@ -26,24 +26,11 @@ function castToType(key, value) { return definitions[key].cast(value); } -export default function WorkspaceSettings({ workspace }) { +export default function WorkspaceSettings({ active, workspace }) { const { slug } = useParams(); const formEl = useRef(null); const [saving, setSaving] = useState(false); const [hasChanges, setHasChanges] = useState(false); - const [totalVectors, setTotalVectors] = useState(null); - const [canDelete, setCanDelete] = useState(false); - - useEffect(() => { - async function fetchKeys() { - const canDelete = await System.getCanDeleteWorkspaces(); - setCanDelete(canDelete); - - const totalVectors = await System.totalIndexes(); - setTotalVectors(totalVectors); - } - fetchKeys(); - }, []); const handleUpdate = async (e) => { setSaving(true); @@ -89,6 +76,9 @@ export default function WorkspaceSettings({ workspace }) {

Vector database identifier

+

+ {" "} +

{workspace?.slug}

@@ -101,13 +91,7 @@ export default function WorkspaceSettings({ workspace }) {

Total number of vectors in your vector database.

- {totalVectors !== null ? ( -

- {totalVectors} -

- ) : ( - - )} + @@ -275,15 +259,7 @@ export default function WorkspaceSettings({ workspace }) {
- {canDelete && ( - - )} + {hasChanges && ( + ); +} + +function VectorCount({ reload, workspace }) { + const [totalVectors, setTotalVectors] = useState(null); + useEffect(() => { + async function fetchVectorCount() { + const totalVectors = await System.totalIndexes(workspace.slug); + setTotalVectors(totalVectors); + } + fetchVectorCount(); + }, [workspace?.slug, reload]); + + if (totalVectors === null) return ; + return ( +

+ {totalVectors} +

+ ); +} diff --git a/frontend/src/components/Modals/MangeWorkspace/index.jsx b/frontend/src/components/Modals/MangeWorkspace/index.jsx index 946e96d26..9092d0d51 100644 --- a/frontend/src/components/Modals/MangeWorkspace/index.jsx +++ b/frontend/src/components/Modals/MangeWorkspace/index.jsx @@ -114,7 +114,10 @@ const ManageWorkspace = ({ hideModal = noop, providedSlug = null }) => { />
- +
diff --git a/frontend/src/models/system.js b/frontend/src/models/system.js index 9b71a6055..c64ac66a1 100644 --- a/frontend/src/models/system.js +++ b/frontend/src/models/system.js @@ -9,8 +9,10 @@ const System = { .then((res) => res?.online || false) .catch(() => false); }, - totalIndexes: async function () { - return await fetch(`${API_BASE}/system/system-vectors`, { + totalIndexes: async function (slug = null) { + const url = new URL(`${API_BASE}/system/system-vectors`); + if (!!slug) url.searchParams.append("slug", encodeURIComponent(slug)); + return await fetch(url.toString(), { headers: baseHeaders(), }) .then((res) => { diff --git a/server/endpoints/invite.js b/server/endpoints/invite.js index c5c344510..4fd8d1545 100644 --- a/server/endpoints/invite.js +++ b/server/endpoints/invite.js @@ -42,11 +42,11 @@ function inviteEndpoints(app) { return; } - const { user, error } = await User.create(({ + const { user, error } = await User.create({ username, password, role: "default", - })); + }); if (!user) { console.error("Accepting invite:", error); response diff --git a/server/endpoints/system.js b/server/endpoints/system.js index 982d5ecaa..a6acf47e6 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -15,6 +15,7 @@ const { makeJWT, userFromSession, multiUserMode, + queryParams, } = require("../utils/http"); const { setupDataImports, @@ -180,16 +181,23 @@ function systemEndpoints(app) { } }); - app.get("/system/system-vectors", [validatedRequest], async (_, response) => { - try { - const VectorDb = getVectorDbClass(); - const vectorCount = await VectorDb.totalVectors(); - response.status(200).json({ vectorCount }); - } catch (e) { - console.log(e.message, e); - response.sendStatus(500).end(); + app.get( + "/system/system-vectors", + [validatedRequest], + async (request, response) => { + try { + const query = queryParams(request); + const VectorDb = getVectorDbClass(); + const vectorCount = !!query.slug + ? await VectorDb.namespaceCount(query.slug) + : await VectorDb.totalVectors(); + response.status(200).json({ vectorCount }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } } - }); + ); app.delete( "/system/remove-document", From dfd03e332cd52987f9ab464c33cd1a39bc98b7dd Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Wed, 10 Jan 2024 15:32:07 -0800 Subject: [PATCH 13/41] patch stream response --- server/utils/chats/stream.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index 84058c8de..73437eec5 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -262,7 +262,7 @@ function handleStreamResponses(response, stream, responseProps) { }); } - if ((stream.type = "togetherAiStream")) { + if (stream.type === "togetherAiStream") { return new Promise((resolve) => { let fullText = ""; let chunk = ""; From 4af9b9d5ccd0c71aefa0d3c8c94579036593b2eb Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Wed, 10 Jan 2024 19:27:39 -0800 Subject: [PATCH 14/41] fix: relative/absolute url patch for vector count connect #516 --- frontend/src/models/system.js | 4 ++-- frontend/src/utils/constants.js | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/frontend/src/models/system.js b/frontend/src/models/system.js index c64ac66a1..f248d4f9a 100644 --- a/frontend/src/models/system.js +++ b/frontend/src/models/system.js @@ -1,4 +1,4 @@ -import { API_BASE, AUTH_TIMESTAMP } from "@/utils/constants"; +import { API_BASE, AUTH_TIMESTAMP, fullApiUrl } from "@/utils/constants"; import { baseHeaders } from "@/utils/request"; import DataConnector from "./dataConnector"; @@ -10,7 +10,7 @@ const System = { .catch(() => false); }, totalIndexes: async function (slug = null) { - const url = new URL(`${API_BASE}/system/system-vectors`); + const url = new URL(`${fullApiUrl()}/system/system-vectors`); if (!!slug) url.searchParams.append("slug", encodeURIComponent(slug)); return await fetch(url.toString(), { headers: baseHeaders(), diff --git a/frontend/src/utils/constants.js b/frontend/src/utils/constants.js index 11b8da976..2fde1ee00 100644 --- a/frontend/src/utils/constants.js +++ b/frontend/src/utils/constants.js @@ -7,3 +7,8 @@ export const COMPLETE_QUESTIONNAIRE = "anythingllm_completed_questionnaire"; export const USER_BACKGROUND_COLOR = "bg-historical-msg-user"; export const AI_BACKGROUND_COLOR = "bg-historical-msg-system"; + +export function fullApiUrl() { + if (API_BASE !== "/api") return API_BASE; + return `${window.location.origin}/api`; +} From a4ace56a401ffc8ce0082d7444159dfd5dc28834 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Thu, 11 Jan 2024 09:57:59 -0800 Subject: [PATCH 15/41] Patch minor XSS opportunity where user can self-XSS themselves. (#574) Patch minor XSS opportunity where user can self-XSS themselvess. There is not real vuln here as any instance is not public facing --- frontend/package.json | 1 + .../ChatContainer/ChatHistory/HistoricalMessage/index.jsx | 6 +++++- frontend/yarn.lock | 5 +++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/frontend/package.json b/frontend/package.json index ff2698953..86e552ab7 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -16,6 +16,7 @@ "@microsoft/fetch-event-source": "^2.0.1", "@phosphor-icons/react": "^2.0.13", "buffer": "^6.0.3", + "dompurify": "^3.0.8", "he": "^1.2.0", "highlight.js": "^11.9.0", "lodash.debounce": "^4.0.8", diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx index f0605a372..4637b1cd7 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx @@ -6,6 +6,8 @@ import { userFromStorage } from "@/utils/request"; import Citations from "../Citation"; import { AI_BACKGROUND_COLOR, USER_BACKGROUND_COLOR } from "@/utils/constants"; import { v4 } from "uuid"; +import createDOMPurify from "dompurify"; +const DOMPurify = createDOMPurify(window); const HistoricalMessage = forwardRef( ( @@ -45,7 +47,9 @@ const HistoricalMessage = forwardRef( ) : ( )} diff --git a/frontend/yarn.lock b/frontend/yarn.lock index e7b223df9..c9181f15f 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -1021,6 +1021,11 @@ doctrine@^3.0.0: dependencies: esutils "^2.0.2" +dompurify@^3.0.8: + version "3.0.8" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.0.8.tgz#e0021ab1b09184bc8af7e35c7dd9063f43a8a437" + integrity sha512-b7uwreMYL2eZhrSCRC4ahLTeZcPZxSmYfmcQGXGkXiZSNW1X85v+SDM5KsWcpivIiUBH47Ji7NtyUdpLeF5JZQ== + electron-to-chromium@^1.4.535: version "1.4.576" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.576.tgz#0c6940fdc0d60f7e34bd742b29d8fa847c9294d1" From 3c859ba3038121b67fb98e87dc52617fa27cbef0 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Thu, 11 Jan 2024 10:54:55 -0800 Subject: [PATCH 16/41] Change pwd check to O(1) check to prevent timing attacks - single user mode (#575) Change pwd check to O(1) check to prevent timing attacks --- frontend/src/components/Modals/Password/index.jsx | 2 +- frontend/src/pages/Login/index.jsx | 6 +++++- server/endpoints/system.js | 10 ++++++++-- server/utils/middleware/validatedRequest.js | 3 ++- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/frontend/src/components/Modals/Password/index.jsx b/frontend/src/components/Modals/Password/index.jsx index e98986851..00fefe4ed 100644 --- a/frontend/src/components/Modals/Password/index.jsx +++ b/frontend/src/components/Modals/Password/index.jsx @@ -37,7 +37,7 @@ export default function PasswordModal({ mode = "single" }) { export function usePasswordModal() { const [auth, setAuth] = useState({ loading: true, - required: false, + requiresAuth: false, mode: "single", }); diff --git a/frontend/src/pages/Login/index.jsx b/frontend/src/pages/Login/index.jsx index ec5950cbe..cf8ab2493 100644 --- a/frontend/src/pages/Login/index.jsx +++ b/frontend/src/pages/Login/index.jsx @@ -1,9 +1,13 @@ import React from "react"; import PasswordModal, { usePasswordModal } from "@/components/Modals/Password"; import { FullScreenLoader } from "@/components/Preloader"; +import { Navigate } from "react-router-dom"; +import paths from "@/utils/paths"; export default function Login() { - const { loading, mode } = usePasswordModal(); + const { loading, requiresAuth, mode } = usePasswordModal(); if (loading) return ; + if (requiresAuth === false) return ; + return ; } diff --git a/server/endpoints/system.js b/server/endpoints/system.js index a6acf47e6..d2a13d10f 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -107,6 +107,8 @@ function systemEndpoints(app) { app.post("/request-token", async (request, response) => { try { + const bcrypt = require("bcrypt"); + if (await SystemSettings.isMultiUserMode()) { const { username, password } = reqBody(request); const existingUser = await User.get({ username }); @@ -121,7 +123,6 @@ function systemEndpoints(app) { return; } - const bcrypt = require("bcrypt"); if (!bcrypt.compareSync(password, existingUser.password)) { response.status(200).json({ user: null, @@ -159,7 +160,12 @@ function systemEndpoints(app) { return; } else { const { password } = reqBody(request); - if (password !== process.env.AUTH_TOKEN) { + if ( + !bcrypt.compareSync( + password, + bcrypt.hashSync(process.env.AUTH_TOKEN, 10) + ) + ) { response.status(401).json({ valid: false, token: null, diff --git a/server/utils/middleware/validatedRequest.js b/server/utils/middleware/validatedRequest.js index 275522bb9..6f3df26da 100644 --- a/server/utils/middleware/validatedRequest.js +++ b/server/utils/middleware/validatedRequest.js @@ -36,8 +36,9 @@ async function validatedRequest(request, response, next) { return; } + const bcrypt = require("bcrypt"); const { p } = decodeJWT(token); - if (p !== process.env.AUTH_TOKEN) { + if (!bcrypt.compareSync(p, bcrypt.hashSync(process.env.AUTH_TOKEN, 10))) { response.status(401).json({ error: "Invalid auth token found.", }); From 7200a06ef07d92eef5f3c4c8be29824aa001d688 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Thu, 11 Jan 2024 12:11:45 -0800 Subject: [PATCH 17/41] prevent manager in multi-user from updatingENV via HTTP (#576) * prevent manager in multi-user from updatingENV via HTTP * remove unneeded args --- server/endpoints/system.js | 6 ++++++ server/utils/http/index.js | 2 ++ 2 files changed, 8 insertions(+) diff --git a/server/endpoints/system.js b/server/endpoints/system.js index d2a13d10f..345bd230a 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -283,6 +283,12 @@ function systemEndpoints(app) { [validatedRequest, flexUserRoleValid], async (request, response) => { try { + const user = await userFromSession(request, response); + if (!!user && user.role !== "admin") { + response.sendStatus(401).end(); + return; + } + const body = reqBody(request); const { newValues, error } = updateENV(body); if (process.env.NODE_ENV === "production") await dumpENV(); diff --git a/server/utils/http/index.js b/server/utils/http/index.js index cb57c4a28..83e3fa5dd 100644 --- a/server/utils/http/index.js +++ b/server/utils/http/index.js @@ -20,6 +20,8 @@ function makeJWT(info = {}, expiry = "30d") { return JWT.sign(info, process.env.JWT_SECRET, { expiresIn: expiry }); } +// Note: Only valid for finding users in multi-user mode +// as single-user mode with password is not a "user" async function userFromSession(request, response = null) { if (!!response && !!response.locals?.user) { return response.locals.user; From 1563a1b20f72846d617a88510970d0426ab880d3 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Thu, 11 Jan 2024 12:29:00 -0800 Subject: [PATCH 18/41] Strict link protocol validation (#577) --- collector/utils/url/index.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/collector/utils/url/index.js b/collector/utils/url/index.js index 3a7f90721..419c02c25 100644 --- a/collector/utils/url/index.js +++ b/collector/utils/url/index.js @@ -1,6 +1,9 @@ +const VALID_PROTOCOLS = ["https:", "http:"]; + function validURL(url) { try { - new URL(url); + const destination = new URL(url); + if (!VALID_PROTOCOLS.includes(destination.protocol)) return false; return true; } catch {} return false; From b2b2c2afe15c48952d57b4d01e7108f9515c5f55 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Thu, 11 Jan 2024 18:37:00 -0800 Subject: [PATCH 19/41] protect AWS CF deployments by automatically blocking metadata URL (#578) --- .../aws/cloudformation/cloudformation_create_anythingllm.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud-deployments/aws/cloudformation/cloudformation_create_anythingllm.json b/cloud-deployments/aws/cloudformation/cloudformation_create_anythingllm.json index 313a4ecdb..e81e16b61 100644 --- a/cloud-deployments/aws/cloudformation/cloudformation_create_anythingllm.json +++ b/cloud-deployments/aws/cloudformation/cloudformation_create_anythingllm.json @@ -82,7 +82,8 @@ "\n", "#!/bin/bash\n", "# check output of userdata script with sudo tail -f /var/log/cloud-init-output.log\n", - "sudo yum install docker -y\n", + "sudo yum install docker iptables -y\n", + "sudo iptables -A OUTPUT -m owner ! --uid-owner root -d 169.254.169.254 -j DROP\n", "sudo systemctl enable docker\n", "sudo systemctl start docker\n", "mkdir -p /home/ec2-user/anythingllm\n", From 6faa0efaa8f8674c8b1221d84224729853218039 Mon Sep 17 00:00:00 2001 From: Shuyoou Date: Sat, 13 Jan 2024 05:23:57 +0800 Subject: [PATCH 20/41] Issue #543 support milvus vector db (#579) * issue #543 support milvus vector db * migrate Milvus to use MilvusClient instead of ORM normalize env setup for docs/implementation feat: embedder model dimension added * update comments --------- Co-authored-by: timothycarambat --- README.md | 1 + docker/.env.example | 6 + .../MilvusDBOptions/index.jsx | 52 +++ frontend/src/media/vectordbs/milvus.png | Bin 0 -> 4250 bytes .../GeneralSettings/VectorDatabase/index.jsx | 9 + .../Steps/DataHandling/index.jsx | 8 + .../Steps/VectorDatabaseConnection/index.jsx | 9 + server/.env.example | 5 + server/models/systemSettings.js | 7 + server/package.json | 3 +- .../EmbeddingEngines/azureOpenAi/index.js | 1 + .../utils/EmbeddingEngines/localAi/index.js | 4 + server/utils/EmbeddingEngines/native/index.js | 1 + server/utils/EmbeddingEngines/openAi/index.js | 1 + server/utils/helpers/index.js | 3 + server/utils/helpers/updateENV.js | 23 +- .../vectorDbProviders/milvus/MILVUS_SETUP.md | 40 ++ .../utils/vectorDbProviders/milvus/index.js | 360 ++++++++++++++++++ .../utils/vectorDbProviders/qdrant/index.js | 8 +- server/yarn.lock | 223 ++++++++++- 20 files changed, 759 insertions(+), 5 deletions(-) create mode 100644 frontend/src/components/VectorDBSelection/MilvusDBOptions/index.jsx create mode 100644 frontend/src/media/vectordbs/milvus.png create mode 100644 server/utils/vectorDbProviders/milvus/MILVUS_SETUP.md create mode 100644 server/utils/vectorDbProviders/milvus/index.js diff --git a/README.md b/README.md index 5af9278b4..4249c42bc 100644 --- a/README.md +++ b/README.md @@ -87,6 +87,7 @@ Some cool features of AnythingLLM - [Chroma](https://trychroma.com) - [Weaviate](https://weaviate.io) - [QDrant](https://qdrant.tech) +- [Milvus](https://milvus.io) ### Technical Overview diff --git a/docker/.env.example b/docker/.env.example index 2f8b2ff35..5bd909af6 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -89,6 +89,12 @@ GID='1000' # QDRANT_ENDPOINT="http://localhost:6333" # QDRANT_API_KEY= +# Enable all below if you are using vector database: Milvus. +# VECTOR_DB="milvus" +# MILVUS_ADDRESS="http://localhost:19530" +# MILVUS_USERNAME= +# MILVUS_PASSWORD= + # CLOUD DEPLOYMENT VARIRABLES ONLY # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. diff --git a/frontend/src/components/VectorDBSelection/MilvusDBOptions/index.jsx b/frontend/src/components/VectorDBSelection/MilvusDBOptions/index.jsx new file mode 100644 index 000000000..07a0ef2f5 --- /dev/null +++ b/frontend/src/components/VectorDBSelection/MilvusDBOptions/index.jsx @@ -0,0 +1,52 @@ +export default function MilvusDBOptions({ settings }) { + return ( +
+
+
+ + +
+ +
+ + +
+
+ + +
+
+
+ ); +} diff --git a/frontend/src/media/vectordbs/milvus.png b/frontend/src/media/vectordbs/milvus.png new file mode 100644 index 0000000000000000000000000000000000000000..e1bcbbd105480ebe9bb6d111823f9e5add9a466e GIT binary patch literal 4250 zcmds5`#;m|8%HRo6rr#biky0+;h~Wt=Cm4?93pc*mo_n!3Q>fSIgHB0WHgLyjCw?p z&0)^RBx6n$Lo~iy&-eRJe1Ev_`*poO_v^av>w3Sh!~IFVY;P$cDlf{%$0q@{GKX^4 z@4T^3m^+&|&1Z5Kk#MVP5qx~ICwPM&3{~ar;E#Y>Uf`>wDb8~RK|eD)Gd@1zQ?cJS zg!uRloCcelIo{!48jq@Q>X&K%A>k*s z8$R|rUz?IQk~fq&a=0S(ChU+#mVl^+fauF(_wV_nN2P!HQPJVjnWzwtqJfyV24W}a zrka3z8_TGLo#hEXNhj*gCB7+WKVOTs{NeB?!orHh@%-)no-MlncOr2`$oqX82+9o2 zFhZdjktCf79mG;7-gw=5+<~|<=|D(f$ZrjdZHD8Qf~=QdR|8jsp{M{*b@aW9s((#6 zXIC{JynNQRv&7#0G`qtIS{jy4j`mY8S1AXB8mJ79+N|g}Jv@8!$+H0N_QN6t)!824 zrUhxd^o}ONGV0@5fOS$to>ddEiLoQG=gdPvVMUdA={o;>hNTI5Yx5Rf=3Tvpd;c&yX)O*UxT)eENFeYOf z?37>7$&tevq;(R`1lh>wOA}WYkm8~$m^RmifbOMdxUk^kW#z1)J8p9Lt2tnTRJ!n= z@3g+56VR}2XIA0GwesP`F!(r#G4~gq%{F!I9S$u)kWm3m152ojS2%UHETyMnR{SF3 zhL;4CTCLpwiK!p(B9s{s3mN>Wxc!U~r;Szpo>(ey|4&)B7pCquQzwjZBhGQNz67#@ z20@o2LntvRuVM5bIiz;~j|(>7i>1({yN94o4jW&0suLKl#?~QDGctAyNHB-842ko{ zGqOciAUpVFy}0ruMxg3VGb=7Zr3~9+jYeZumGR(aL!>|(B+sU)=>Y>ty6|eTcbL_F z+n`^i6Q?tJP}GZ_yuw)7PD|q#I>z&sC(s4GF_4wHff<1+4PJB3cVG7+rDdEhQe*d% zUL4b3i>;22f|+O4*j=L)tZpP!$xt8n9%&Z)iVj~oWSf6tg7BEMbV&$B4X&9= zb&!A1{37Tzy+E6!JS(=n2!_(btLt*FA$VwS)12E#aAr{6f8W@H@*?FbISpU&3=vKGcJM&7UkSp))> zeT|Osl+H*SF1cF_W`Sc3Ur4K&40<=g{Mw5=0MLH7NRWi73rmK88z_?%y)`a?m%J^`&t-dhF~>EsWrYI+MBqDDkr zTJq8_#6sS2Sz~_Ot}R}yKEcXpzT#Iw95hIFGx@n=M$0|;iJTTm|DXFIr=pT(Y5H<^ zUbcdaj7`pz4F7nWH=j3?OTN^g}cx;2OfhAikiTIlbE{@*dHb6 zmPpsk`DF|a^0cv%z#x*}mv_$qifg{sI;>>LO_G1z(?YOHwBYrYizWnMvy z^!;T%c;Inr6U8<=(6l{?{4kTpj0VWm56zODz)&lTUP{XKL9`YP18|!!hPTffolM>0 z_wLPsLQux$y_c^(A{s6EO4^Ax|wNc@Qb}5h{kE`b5g64K5}i17z`Z!QUS+m(XRJHnc%}hD9AFS1 zvVM*`eYk#+V-jrp8>H5AiDwgd>L&TMkGVA^rINu+Ef#YdEeHx~*lDu403O0CMQJr$ zviOaYGB7JPF{Swgy;dV^FYk`q51w;V3BjO;1GtRQsu2ke!nA9nje^arK4?c<(wjt{ zMs=Na7i%mzmz9;newdySU4MmIFFx{jw;e+1C+l38X+iPE;ZYTI8U4kmqPKQEr>QP+ zg4RIQ`x2ZEh7lQ4pT*iVG$$dYY(6#XN2zcl{0zF~ z>s70!s4G?psZo}tXSr^o(7fUbnTP4G%lzDPVY-mbH-e!GFYyYiw%U~W_ps-P9%cvg ztVk}8J3c4lm$Y8v*bMZ}<5+=zWI<2ncQ^J;sVGKj>^*-~sb= zCqmw5wdbi6F`J*bKRyIcy^B^_^ng7yw;N<;Av--L6JMw=GOQ_Ze@HSRK_3C%5ssBv z<-eOc(vvy3IIIvU!=};#G6|pmYw*TGu6abgRcu&<)k6ZdX$pkjHM|nIn}r^Ni2{2a zWa^*!3*_{R5-Y|5UdbSo`xR#x;I(FZb)9iCH`9083K(>^MiWaJHEUxtQLY!1VGFCD zl$uo#DHD8?1a1@|VuNad%?*CK*(pca7qo1=3MB@fM?dn3_zXFZ%xrH(m()FJxUZ$n zOPaGtmEqJMRddD@Q;{XKkwa29f&!YW>c4@il8Xi~n`>=Q4_9gHlyOWqCGJt{?0wTm zcXVpI00t?)6ci92oaHz(XPr#(z_i@o0e`GRDO-x!O%I}MK(6hrdQu}ELEzu3e?xci zHGQ#CR8!>n;>($|0a6P|dHE-?;ax06(_5X^SE(W z=QfBVS~P#0?S{1|M0ZDcxju-!PFr02fw-V--+gV_%t*HIw~x9=PVGLTR_{zIIgyNN zB_b;!`!hE~fL}cN8ghJr_r-YGAwT~}Fmmez7P#|83ooWZ^IfJ#yC3nc=?j}XtiBvU zb5mifrsJdSZ#VS0Td#xP2RcFsqQruOtCM<i$Vm-^bMhc zs2b-7x5^@H<`#rYO+IJ3nPPfHR0D|2H#Tv{Nbaq`ZV#4P0SsNT{5irPlS9!I28SGs z2zCj3=}0+VoEpb*V4t!a1CA%j^ODbUoF|-jpNcPh016G_zqEGAPCTDo_`GH+p>n7? zB4M|rrUy)5+mCXhUpZ4SV4r%uI+(bu~nE1SzA>pu#=b|Kd>X70ELD|&4 zLcC_3iIw4Sutge3&7`u0+1iQl>2Aq|=6q{_mJJ2(F1YJ%yc=1JOF9J&5ptup6&*uZ zbsl)kLDh+c#2Db0lqXa(oOoR6P<=zxr9YLrPV~`AE;0tQ@Iqu;{d)H!pz4PvH^XN_6Ai61LsdRRm)pCFEc zMe4G3!sYsOu-GPKJVS)+6=0J=>#ZiW++ET@c*biMog|>6$<&=IcRLbmGXvqU7z18G z^_CtQ!VG0@{_}9aUO_>6bYy;6gfQe%LGfFR8%aw6kZYC`1q!GD!lfw@kHNI<~um5j}9&(nF99Vtfr`CwvvdYH}j6-9kO@`qK zaU666EIbxh#Eo-13b{Sh*3bpLerG&q%>8v&q0+(Ci7_{Qre|p~E#=qNer_?x*z-sI zX<+OG6kLyZQ!o>d&A*8!Mruu<=#dXuPT%r2pEaVr3JtxO_fN~K_lIV;#`BU(woZK4 zNk9#Dy^awnWk3BY@0jEq79!Vv?xM!);H5h3C?N7fKH+y>w>wLyOW=P!l0yDCN%eZI zwrY7E)ncO5&r718)6y{}=MmGlW?R)c3wC#z;%d>)*{WTxU*94tYVhDJfq2#&XVP%L zPGPfd(BUCnKfpqTkp*=C^3Nd5vPN`pl+{Bl1w<)g$eV+;8iVh_Rm0OrIOZr?@k!`f7g3nhe#1Tyih>+FWvGx<6Iwba8v*4!cjF#;i z@M#l0TwKBT#kHNa!BO$kJ;TGRdDbDB-#@7uBl=8k{0=E`w7Kp|e3s?A#cr+$`hfC| zSzHm~vYFQr$bQ{w!df7X@;q3Zn2N1mSR{@=@=V^so79tl zQ?FGf+?29oZ@uC4uCrB+a4qJ(B}xg=`u+5^wU1W^g%!`s*^T6^tz|xWJoegz~Klbqa szbCDSd*`!UH%f5DJ^xSQ&@SJYucTO~g+l`OR{|f{!rr{{g6IAJ04Mq>`v3p{ literal 0 HcmV?d00001 diff --git a/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx b/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx index 9ef9cff2d..f49054b90 100644 --- a/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx +++ b/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx @@ -8,6 +8,7 @@ import PineconeLogo from "@/media/vectordbs/pinecone.png"; import LanceDbLogo from "@/media/vectordbs/lancedb.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png"; +import MilvusLogo from "@/media/vectordbs/milvus.png"; import PreLoader from "@/components/Preloader"; import ChangeWarningModal from "@/components/ChangeWarning"; import { MagnifyingGlass } from "@phosphor-icons/react"; @@ -17,6 +18,7 @@ import PineconeDBOptions from "@/components/VectorDBSelection/PineconeDBOptions" import QDrantDBOptions from "@/components/VectorDBSelection/QDrantDBOptions"; import WeaviateDBOptions from "@/components/VectorDBSelection/WeaviateDBOptions"; import VectorDBItem from "@/components/VectorDBSelection/VectorDBItem"; +import MilvusDBOptions from "@/components/VectorDBSelection/MilvusDBOptions"; export default function GeneralVectorDatabase() { const [saving, setSaving] = useState(false); @@ -79,6 +81,13 @@ export default function GeneralVectorDatabase() { description: "Open source local and cloud hosted multi-modal vector database.", }, + { + name: "Milvus", + value: "milvus", + logo: MilvusLogo, + options: , + description: "Open-source, highly scalable, and blazing fast.", + }, ]; const updateVectorChoice = (selection) => { diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx index d9fea4c62..281f1e8cd 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx @@ -14,6 +14,7 @@ import PineconeLogo from "@/media/vectordbs/pinecone.png"; import LanceDbLogo from "@/media/vectordbs/lancedb.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png"; +import MilvusLogo from "@/media/vectordbs/milvus.png"; import React, { useState, useEffect } from "react"; import paths from "@/utils/paths"; import { useNavigate } from "react-router-dom"; @@ -123,6 +124,13 @@ const VECTOR_DB_PRIVACY = { ], logo: WeaviateLogo, }, + milvus: { + name: "Milvus", + description: [ + "Your vectors and document text are stored on your Milvus instance (cloud or self-hosted)", + ], + logo: MilvusLogo, + }, lancedb: { name: "LanceDB", description: [ diff --git a/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx index f451fc3e2..37e0e5b73 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx @@ -5,6 +5,7 @@ import PineconeLogo from "@/media/vectordbs/pinecone.png"; import LanceDbLogo from "@/media/vectordbs/lancedb.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png"; +import MilvusLogo from "@/media/vectordbs/milvus.png"; import System from "@/models/system"; import paths from "@/utils/paths"; import PineconeDBOptions from "@/components/VectorDBSelection/PineconeDBOptions"; @@ -12,6 +13,7 @@ import ChromaDBOptions from "@/components/VectorDBSelection/ChromaDBOptions"; import QDrantDBOptions from "@/components/VectorDBSelection/QDrantDBOptions"; import WeaviateDBOptions from "@/components/VectorDBSelection/WeaviateDBOptions"; import LanceDBOptions from "@/components/VectorDBSelection/LanceDBOptions"; +import MilvusOptions from "@/components/VectorDBSelection/MilvusDBOptions"; import showToast from "@/utils/toast"; import { useNavigate } from "react-router-dom"; import VectorDBItem from "@/components/VectorDBSelection/VectorDBItem"; @@ -81,6 +83,13 @@ export default function VectorDatabaseConnection({ description: "Open source local and cloud hosted multi-modal vector database.", }, + { + name: "Milvus", + value: "milvus", + logo: MilvusLogo, + options: , + description: "Open-source, highly scalable, and blazing fast.", + }, ]; function handleForward() { diff --git a/server/.env.example b/server/.env.example index e41ab63d0..d060e0ab5 100644 --- a/server/.env.example +++ b/server/.env.example @@ -86,6 +86,11 @@ VECTOR_DB="lancedb" # QDRANT_ENDPOINT="http://localhost:6333" # QDRANT_API_KEY= +# Enable all below if you are using vector database: Milvus. +# VECTOR_DB="milvus" +# MILVUS_ADDRESS="http://localhost:19530" +# MILVUS_USERNAME= +# MILVUS_PASSWORD= # CLOUD DEPLOYMENT VARIRABLES ONLY # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index 29c2238ff..cd008d420 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -56,6 +56,13 @@ const SystemSettings = { QdrantApiKey: process.env.QDRANT_API_KEY, } : {}), + ...(vectorDB === "milvus" + ? { + MilvusAddress: process.env.MILVUS_ADDRESS, + MilvusUsername: process.env.MILVUS_USERNAME, + MilvusPassword: !!process.env.MILVUS_PASSWORD, + } + : {}), LLMProvider: llmProvider, ...(llmProvider === "openai" ? { diff --git a/server/package.json b/server/package.json index 0e2d909c8..69cb790c3 100644 --- a/server/package.json +++ b/server/package.json @@ -28,6 +28,7 @@ "@prisma/client": "5.3.0", "@qdrant/js-client-rest": "^1.4.0", "@xenova/transformers": "^2.10.0", + "@zilliz/milvus2-sdk-node": "^2.3.5", "archiver": "^5.3.1", "bcrypt": "^5.1.0", "body-parser": "^1.20.2", @@ -77,4 +78,4 @@ "nodemon": "^2.0.22", "prettier": "^3.0.3" } -} \ No newline at end of file +} diff --git a/server/utils/EmbeddingEngines/azureOpenAi/index.js b/server/utils/EmbeddingEngines/azureOpenAi/index.js index e80b4b734..8cde1fc7c 100644 --- a/server/utils/EmbeddingEngines/azureOpenAi/index.js +++ b/server/utils/EmbeddingEngines/azureOpenAi/index.js @@ -13,6 +13,7 @@ class AzureOpenAiEmbedder { new AzureKeyCredential(process.env.AZURE_OPENAI_KEY) ); this.openai = openai; + this.dimensions = 1536; // Limit of how many strings we can process in a single pass to stay with resource or network limits // https://learn.microsoft.com/en-us/azure/ai-services/openai/faq#i-am-trying-to-use-embeddings-and-received-the-error--invalidrequesterror--too-many-inputs--the-max-number-of-inputs-is-1---how-do-i-fix-this-:~:text=consisting%20of%20up%20to%2016%20inputs%20per%20API%20request diff --git a/server/utils/EmbeddingEngines/localAi/index.js b/server/utils/EmbeddingEngines/localAi/index.js index 1480755d7..6f9d721b9 100644 --- a/server/utils/EmbeddingEngines/localAi/index.js +++ b/server/utils/EmbeddingEngines/localAi/index.js @@ -16,6 +16,10 @@ class LocalAiEmbedder { : {}), }); this.openai = new OpenAIApi(config); + // We don't know this for user's set model so for vectorDB integrations that requires dimensionality + // in schema, we will throw an error. + // Applies to QDrant and Milvus. + this.dimensions = null; // Limit of how many strings we can process in a single pass to stay with resource or network limits this.maxConcurrentChunks = 50; diff --git a/server/utils/EmbeddingEngines/native/index.js b/server/utils/EmbeddingEngines/native/index.js index 69e13a9e3..d2acde32a 100644 --- a/server/utils/EmbeddingEngines/native/index.js +++ b/server/utils/EmbeddingEngines/native/index.js @@ -12,6 +12,7 @@ class NativeEmbedder { : path.resolve(__dirname, `../../../storage/models`) ); this.modelPath = path.resolve(this.cacheDir, "Xenova", "all-MiniLM-L6-v2"); + this.dimensions = 384; // Limit of how many strings we can process in a single pass to stay with resource or network limits this.maxConcurrentChunks = 50; diff --git a/server/utils/EmbeddingEngines/openAi/index.js b/server/utils/EmbeddingEngines/openAi/index.js index 105be9d73..31f556e89 100644 --- a/server/utils/EmbeddingEngines/openAi/index.js +++ b/server/utils/EmbeddingEngines/openAi/index.js @@ -9,6 +9,7 @@ class OpenAiEmbedder { }); const openai = new OpenAIApi(config); this.openai = openai; + this.dimensions = 1536; // Limit of how many strings we can process in a single pass to stay with resource or network limits this.maxConcurrentChunks = 500; diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index ac7029362..1685acc1a 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -16,6 +16,9 @@ function getVectorDbClass() { case "qdrant": const { QDrant } = require("../vectorDbProviders/qdrant"); return QDrant; + case "milvus": + const { Milvus } = require("../vectorDbProviders/milvus"); + return Milvus; default: throw new Error("ENV: No VECTOR_DB value found in environment!"); } diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index e6e97df5f..c699cf2df 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -170,6 +170,20 @@ const KEY_MAPPING = { checks: [], }, + // Milvus Options + MilvusAddress: { + envKey: "MILVUS_ADDRESS", + checks: [isValidURL, validDockerizedUrl], + }, + MilvusUsername: { + envKey: "MILVUS_USERNAME", + checks: [isNotEmpty], + }, + MilvusPassword: { + envKey: "MILVUS_PASSWORD", + checks: [isNotEmpty], + }, + // Together Ai Options TogetherAiApiKey: { envKey: "TOGETHER_AI_API_KEY", @@ -279,7 +293,14 @@ function supportedEmbeddingModel(input = "") { } function supportedVectorDB(input = "") { - const supported = ["chroma", "pinecone", "lancedb", "weaviate", "qdrant"]; + const supported = [ + "chroma", + "pinecone", + "lancedb", + "weaviate", + "qdrant", + "milvus", + ]; return supported.includes(input) ? null : `Invalid VectorDB type. Must be one of ${supported.join(", ")}.`; diff --git a/server/utils/vectorDbProviders/milvus/MILVUS_SETUP.md b/server/utils/vectorDbProviders/milvus/MILVUS_SETUP.md new file mode 100644 index 000000000..6bd9b8150 --- /dev/null +++ b/server/utils/vectorDbProviders/milvus/MILVUS_SETUP.md @@ -0,0 +1,40 @@ +# How to setup a local (or remote) Milvus Vector Database + +[Official Milvus Docs](https://milvus.io/docs/example_code.md) for reference. + +### How to get started + +**Requirements** + +Choose one of the following + +- Cloud + + - [Cloud account](https://cloud.zilliz.com/) + +- Local + - Docker + - `git` available in your CLI/terminal + +**Instructions** + +- Cloud + + - Create a Cluster on your cloud account + - Get connect Public Endpoint and Token + - Set .env.development variable in server + +- Local + - Download yaml file `wget https://github.com/milvus-io/milvus/releases/download/v2.3.4/milvus-standalone-docker-compose.yml -O docker-compose.yml` + - Start Milvus `sudo docker compose up -d` + - Check the containers are up and running `sudo docker compose ps` + - Get port number and set .env.development variable in server + +eg: `server/.env.development` + +``` +VECTOR_DB="milvus" +MILVUS_ADDRESS="http://localhost:19530" +MILVUS_USERNAME=minioadmin # Whatever your username and password are +MILVUS_PASSWORD=minioadmin +``` diff --git a/server/utils/vectorDbProviders/milvus/index.js b/server/utils/vectorDbProviders/milvus/index.js new file mode 100644 index 000000000..a9104784b --- /dev/null +++ b/server/utils/vectorDbProviders/milvus/index.js @@ -0,0 +1,360 @@ +const { + DataType, + MetricType, + IndexType, + MilvusClient, +} = require("@zilliz/milvus2-sdk-node"); +const { RecursiveCharacterTextSplitter } = require("langchain/text_splitter"); +const { v4: uuidv4 } = require("uuid"); +const { storeVectorResult, cachedVectorInformation } = require("../../files"); +const { + toChunks, + getLLMProvider, + getEmbeddingEngineSelection, +} = require("../../helpers"); + +const Milvus = { + name: "Milvus", + connect: async function () { + if (process.env.VECTOR_DB !== "milvus") + throw new Error("Milvus::Invalid ENV settings"); + + const client = new MilvusClient({ + address: process.env.MILVUS_ADDRESS, + username: process.env.MILVUS_USERNAME, + password: process.env.MILVUS_PASSWORD, + }); + + const { isHealthy } = await client.checkHealth(); + if (!isHealthy) + throw new Error( + "MilvusDB::Invalid Heartbeat received - is the instance online?" + ); + + return { client }; + }, + heartbeat: async function () { + await this.connect(); + return { heartbeat: Number(new Date()) }; + }, + totalVectors: async function () { + const { client } = await this.connect(); + const { collection_names } = await client.listCollections(); + const total = collection_names.reduce(async (acc, collection_name) => { + const statistics = await client.getCollectionStatistics({ + collection_name, + }); + return Number(acc) + Number(statistics?.data?.row_count ?? 0); + }, 0); + return total; + }, + namespaceCount: async function (_namespace = null) { + const { client } = await this.connect(); + const statistics = await client.getCollectionStatistics({ + collection_name: _namespace, + }); + return Number(statistics?.data?.row_count ?? 0); + }, + namespace: async function (client, namespace = null) { + if (!namespace) throw new Error("No namespace value provided."); + const collection = await client + .getCollectionStatistics({ collection_name: namespace }) + .catch(() => null); + return collection; + }, + hasNamespace: async function (namespace = null) { + if (!namespace) return false; + const { client } = await this.connect(); + return await this.namespaceExists(client, namespace); + }, + namespaceExists: async function (client, namespace = null) { + if (!namespace) throw new Error("No namespace value provided."); + const { value } = await client + .hasCollection({ collection_name: namespace }) + .catch((e) => { + console.error("MilvusDB::namespaceExists", e.message); + return { value: false }; + }); + return value; + }, + deleteVectorsInNamespace: async function (client, namespace = null) { + await client.dropCollection({ collection_name: namespace }); + return true; + }, + getOrCreateCollection: async function (client, namespace) { + const isExists = await this.namespaceExists(client, namespace); + if (!isExists) { + const embedder = getEmbeddingEngineSelection(); + if (!embedder.dimensions) + throw new Error( + `Your embedder selection has unknown dimensions output. It should be defined when using ${this.name}. Open an issue on Github for support.` + ); + + await client.createCollection({ + collection_name: namespace, + fields: [ + { + name: "id", + description: "id", + data_type: DataType.VarChar, + max_length: 255, + is_primary_key: true, + }, + { + name: "vector", + description: "vector", + data_type: DataType.FloatVector, + dim: embedder.dimensions, + }, + { + name: "metadata", + decription: "metadata", + data_type: DataType.JSON, + }, + ], + }); + await client.createIndex({ + collection_name: namespace, + field_name: "vector", + index_type: IndexType.AUTOINDEX, + metric_type: MetricType.COSINE, + }); + await client.loadCollectionSync({ + collection_name: namespace, + }); + } + }, + addDocumentToNamespace: async function ( + namespace, + documentData = {}, + fullFilePath = null + ) { + const { DocumentVectors } = require("../../../models/vectors"); + try { + const { pageContent, docId, ...metadata } = documentData; + if (!pageContent || pageContent.length == 0) return false; + + console.log("Adding new vectorized document into namespace", namespace); + const cacheResult = await cachedVectorInformation(fullFilePath); + if (cacheResult.exists) { + const { client } = await this.connect(); + await this.getOrCreateCollection(client, namespace); + + const { chunks } = cacheResult; + const documentVectors = []; + + for (const chunk of chunks) { + // Before sending to Pinecone and saving the records to our db + // we need to assign the id of each chunk that is stored in the cached file. + const newChunks = chunk.map((chunk) => { + const id = uuidv4(); + documentVectors.push({ docId, vectorId: id }); + return { id, vector: chunk.values, metadata: chunk.metadata }; + }); + const insertResult = await client.insert({ + collection_name: namespace, + data: newChunks, + }); + + if (insertResult?.status.error_code !== "Success") { + throw new Error( + `Error embedding into Milvus! Reason:${insertResult?.status.reason}` + ); + } + } + await DocumentVectors.bulkInsert(documentVectors); + await client.flushSync({ collection_names: [namespace] }); + return true; + } + + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: + getEmbeddingEngineSelection()?.embeddingMaxChunkLength || 1_000, + chunkOverlap: 20, + }); + const textChunks = await textSplitter.splitText(pageContent); + + console.log("Chunks created from document:", textChunks.length); + const LLMConnector = getLLMProvider(); + const documentVectors = []; + const vectors = []; + const vectorValues = await LLMConnector.embedChunks(textChunks); + + if (!!vectorValues && vectorValues.length > 0) { + for (const [i, vector] of vectorValues.entries()) { + const vectorRecord = { + id: uuidv4(), + values: vector, + // [DO NOT REMOVE] + // LangChain will be unable to find your text if you embed manually and dont include the `text` key. + metadata: { ...metadata, text: textChunks[i] }, + }; + + vectors.push(vectorRecord); + documentVectors.push({ docId, vectorId: vectorRecord.id }); + } + } else { + throw new Error( + "Could not embed document chunks! This document will not be recorded." + ); + } + + if (vectors.length > 0) { + const chunks = []; + const { client } = await this.connect(); + await this.getOrCreateCollection(client, namespace); + + console.log("Inserting vectorized chunks into Milvus."); + for (const chunk of toChunks(vectors, 100)) { + chunks.push(chunk); + const insertResult = await client.insert({ + collection_name: namespace, + data: chunk.map((item) => ({ + id: item.id, + vector: item.values, + metadata: chunk.metadata, + })), + }); + + if (insertResult?.status.error_code !== "Success") { + throw new Error( + `Error embedding into Milvus! Reason:${insertResult?.status.reason}` + ); + } + } + await storeVectorResult(chunks, fullFilePath); + await client.flushSync({ collection_names: [namespace] }); + } + + await DocumentVectors.bulkInsert(documentVectors); + return true; + } catch (e) { + console.error(e); + console.error("addDocumentToNamespace", e.message); + return false; + } + }, + deleteDocumentFromNamespace: async function (namespace, docId) { + const { DocumentVectors } = require("../../../models/vectors"); + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) return; + const knownDocuments = await DocumentVectors.where({ docId }); + if (knownDocuments.length === 0) return; + + const vectorIds = knownDocuments.map((doc) => doc.vectorId); + const queryIn = vectorIds.map((v) => `'${v}'`).join(","); + await client.deleteEntities({ + collection_name: namespace, + expr: `id in [${queryIn}]`, + }); + + const indexes = knownDocuments.map((doc) => doc.id); + await DocumentVectors.deleteIds(indexes); + + // Even after flushing Milvus can take some time to re-calc the count + // so all we can hope to do is flushSync so that the count can be correct + // on a later call. + await client.flushSync({ collection_names: [namespace] }); + return true; + }, + performSimilaritySearch: async function ({ + namespace = null, + input = "", + LLMConnector = null, + similarityThreshold = 0.25, + }) { + if (!namespace || !input || !LLMConnector) + throw new Error("Invalid request to performSimilaritySearch."); + + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) { + return { + contextTexts: [], + sources: [], + message: "Invalid query - no documents found for workspace!", + }; + } + + const queryVector = await LLMConnector.embedTextInput(input); + const { contextTexts, sourceDocuments } = await this.similarityResponse( + client, + namespace, + queryVector, + similarityThreshold + ); + + const sources = sourceDocuments.map((metadata, i) => { + return { ...metadata, text: contextTexts[i] }; + }); + return { + contextTexts, + sources: this.curateSources(sources), + message: false, + }; + }, + similarityResponse: async function ( + client, + namespace, + queryVector, + similarityThreshold = 0.25 + ) { + const result = { + contextTexts: [], + sourceDocuments: [], + scores: [], + }; + const response = await client.search({ + collection_name: namespace, + vectors: queryVector, + }); + response.results.forEach((match) => { + if (match.score < similarityThreshold) return; + result.contextTexts.push(match.metadata.text); + result.sourceDocuments.push(match); + result.scores.push(match.score); + }); + return result; + }, + "namespace-stats": async function (reqBody = {}) { + const { namespace = null } = reqBody; + if (!namespace) throw new Error("namespace required"); + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) + throw new Error("Namespace by that name does not exist."); + const stats = await this.namespace(client, namespace); + return stats + ? stats + : { message: "No stats were able to be fetched from DB for namespace" }; + }, + "delete-namespace": async function (reqBody = {}) { + const { namespace = null } = reqBody; + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) + throw new Error("Namespace by that name does not exist."); + + const statistics = await this.namespace(client, namespace); + await this.deleteVectorsInNamespace(client, namespace); + const vectorCount = Number(statistics?.data?.row_count ?? 0); + return { + message: `Namespace ${namespace} was deleted along with ${vectorCount} vectors.`, + }; + }, + curateSources: function (sources = []) { + const documents = []; + for (const source of sources) { + const { metadata = {} } = source; + if (Object.keys(metadata).length > 0) { + documents.push({ + ...metadata, + ...(source.hasOwnProperty("pageContent") + ? { text: source.pageContent } + : {}), + }); + } + } + + return documents; + }, +}; + +module.exports.Milvus = Milvus; diff --git a/server/utils/vectorDbProviders/qdrant/index.js b/server/utils/vectorDbProviders/qdrant/index.js index 49b25a3d6..ddc3408da 100644 --- a/server/utils/vectorDbProviders/qdrant/index.js +++ b/server/utils/vectorDbProviders/qdrant/index.js @@ -112,9 +112,15 @@ const QDrant = { if (await this.namespaceExists(client, namespace)) { return await client.getCollection(namespace); } + + const embedder = getEmbeddingEngineSelection(); + if (!embedder.dimensions) + throw new Error( + `Your embedder selection has unknown dimensions output. It should be defined when using ${this.name}. Open an issue on Github for support.` + ); await client.createCollection(namespace, { vectors: { - size: 1536, //TODO: Fixed to OpenAI models - when other embeddings exist make variable. + size: embedder.dimensions, distance: "Cosine", }, }); diff --git a/server/yarn.lock b/server/yarn.lock index 6215bf01f..175a67947 100644 --- a/server/yarn.lock +++ b/server/yarn.lock @@ -160,6 +160,20 @@ "@azure/logger" "^1.0.3" tslib "^2.4.0" +"@colors/colors@1.6.0", "@colors/colors@^1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.6.0.tgz#ec6cd237440700bc23ca23087f513c75508958b0" + integrity sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA== + +"@dabh/diagnostics@^2.0.2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@dabh/diagnostics/-/diagnostics-2.0.3.tgz#7f7e97ee9a725dffc7808d93668cc984e1dc477a" + integrity sha512-hrlQOIi7hAfzsMqlGSFyVucrx38O+j6wiGOf//H2ecvIEqYN4ADBSS2iLMh5UFyDunCNniUIPk/q3riFv45xRA== + dependencies: + colorspace "1.1.x" + enabled "2.0.x" + kuler "^2.0.0" + "@eslint-community/eslint-utils@^4.2.0": version "4.4.0" resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59" @@ -214,6 +228,35 @@ resolved "https://registry.yarnpkg.com/@graphql-typed-document-node/core/-/core-3.2.0.tgz#5f3d96ec6b2354ad6d8a28bf216a1d97b5426861" integrity sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ== +"@grpc/grpc-js@1.8.17": + version "1.8.17" + resolved "https://registry.yarnpkg.com/@grpc/grpc-js/-/grpc-js-1.8.17.tgz#a3a2f826fc033eae7d2f5ee41e0ab39cee948838" + integrity sha512-DGuSbtMFbaRsyffMf+VEkVu8HkSXEUfO3UyGJNtqxW9ABdtTIA+2UXAJpwbJS+xfQxuwqLUeELmL6FuZkOqPxw== + dependencies: + "@grpc/proto-loader" "^0.7.0" + "@types/node" ">=12.12.47" + +"@grpc/proto-loader@0.7.7": + version "0.7.7" + resolved "https://registry.yarnpkg.com/@grpc/proto-loader/-/proto-loader-0.7.7.tgz#d33677a77eea8407f7c66e2abd97589b60eb4b21" + integrity sha512-1TIeXOi8TuSCQprPItwoMymZXxWT0CPxUhkrkeCUH+D8U7QDwQ6b7SUz2MaLuWM2llT+J/TVFLmQI5KtML3BhQ== + dependencies: + "@types/long" "^4.0.1" + lodash.camelcase "^4.3.0" + long "^4.0.0" + protobufjs "^7.0.0" + yargs "^17.7.2" + +"@grpc/proto-loader@^0.7.0": + version "0.7.10" + resolved "https://registry.yarnpkg.com/@grpc/proto-loader/-/proto-loader-0.7.10.tgz#6bf26742b1b54d0a473067743da5d3189d06d720" + integrity sha512-CAqDfoaQ8ykFd9zqBDn4k6iWT9loLAlc2ETmDFS9JCD70gDcnA4L3AFEo2iV7KyAtAAHFW9ftq1Fz+Vsgq80RQ== + dependencies: + lodash.camelcase "^4.3.0" + long "^5.0.0" + protobufjs "^7.2.4" + yargs "^17.7.2" + "@hapi/hoek@^9.0.0": version "9.3.0" resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.3.0.tgz#8368869dcb735be2e7f5cb7647de78e167a251fb" @@ -755,6 +798,13 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.14.5.tgz#4a13a6445862159303fc38586598a9396fc408b3" integrity sha512-CRT4tMK/DHYhw1fcCEBwME9CSaZNclxfzVMe7GsO6ULSwsttbj70wSiX6rZdIjGblu93sTJxLdhNIT85KKI7Qw== +"@types/node@>=12.12.47": + version "20.10.8" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.10.8.tgz#f1e223cbde9e25696661d167a5b93a9b2a5d57c7" + integrity sha512-f8nQs3cLxbAFc00vEU59yf9UyGUftkPaLGfvbVOIDdx2i1b8epBqj2aNGyP19fiyXWvlmZ7qC1XLjAzw/OKIeA== + dependencies: + undici-types "~5.26.4" + "@types/node@>=13.7.0": version "20.10.3" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.10.3.tgz#4900adcc7fc189d5af5bb41da8f543cea6962030" @@ -779,6 +829,11 @@ resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.0.tgz#2b35eccfcee7d38cd72ad99232fbd58bffb3c84d" integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA== +"@types/triple-beam@^1.3.2": + version "1.3.5" + resolved "https://registry.yarnpkg.com/@types/triple-beam/-/triple-beam-1.3.5.tgz#74fef9ffbaa198eb8b588be029f38b00299caa2c" + integrity sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw== + "@types/uuid@^9.0.1": version "9.0.7" resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.7.tgz#b14cebc75455eeeb160d5fe23c2fcc0c64f724d8" @@ -806,6 +861,18 @@ optionalDependencies: onnxruntime-node "1.14.0" +"@zilliz/milvus2-sdk-node@^2.3.5": + version "2.3.5" + resolved "https://registry.yarnpkg.com/@zilliz/milvus2-sdk-node/-/milvus2-sdk-node-2.3.5.tgz#6540bc03ebb99ab35f63e4eca7a1fd3ede2cf38c" + integrity sha512-bWbQnhvu+7jZXoqI+qySycwph3vloy0LDV54TBY4wRmu6HhMlqIqyIiI8sQNeSJFs8M1jHg1PlmhE/dvckA1bA== + dependencies: + "@grpc/grpc-js" "1.8.17" + "@grpc/proto-loader" "0.7.7" + dayjs "^1.11.7" + lru-cache "^9.1.2" + protobufjs "7.2.4" + winston "^3.9.0" + abbrev@1: version "1.1.1" resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" @@ -1487,7 +1554,7 @@ cmake-js@^7.2.1: which "^2.0.2" yargs "^17.6.0" -color-convert@^1.9.0: +color-convert@^1.9.0, color-convert@^1.9.3: version "1.9.3" resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== @@ -1511,7 +1578,7 @@ color-name@^1.0.0, color-name@~1.1.4: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -color-string@^1.9.0: +color-string@^1.6.0, color-string@^1.9.0: version "1.9.1" resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.9.1.tgz#4467f9146f036f855b764dfb5bf8582bf342c7a4" integrity sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg== @@ -1524,6 +1591,14 @@ color-support@^1.1.2, color-support@^1.1.3: resolved "https://registry.yarnpkg.com/color-support/-/color-support-1.1.3.tgz#93834379a1cc9a0c61f82f52f0d04322251bd5a2" integrity sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg== +color@^3.1.3: + version "3.2.1" + resolved "https://registry.yarnpkg.com/color/-/color-3.2.1.tgz#3544dc198caf4490c3ecc9a790b54fe9ff45e164" + integrity sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA== + dependencies: + color-convert "^1.9.3" + color-string "^1.6.0" + color@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/color/-/color-4.2.3.tgz#d781ecb5e57224ee43ea9627560107c0e0c6463a" @@ -1537,6 +1612,14 @@ colors@^1.4.0: resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== +colorspace@1.1.x: + version "1.1.4" + resolved "https://registry.yarnpkg.com/colorspace/-/colorspace-1.1.4.tgz#8d442d1186152f60453bf8070cd66eb364e59243" + integrity sha512-BgvKJiuVu1igBUF2kEjRCZXol6wiiGbY5ipL/oVPwm0BL9sIpMIzM8IK7vwuxIIzOXMV3Ey5w+vxhm0rR/TN8w== + dependencies: + color "^3.1.3" + text-hex "1.0.x" + combined-stream@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" @@ -1680,6 +1763,11 @@ crypt@0.0.2: resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow== +dayjs@^1.11.7: + version "1.11.10" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.10.tgz#68acea85317a6e164457d6d6947564029a6a16a0" + integrity sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ== + debug@2.6.9: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" @@ -1835,6 +1923,11 @@ emoji-regex@^8.0.0: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== +enabled@2.0.x: + version "2.0.0" + resolved "https://registry.yarnpkg.com/enabled/-/enabled-2.0.0.tgz#f9dd92ec2d6f4bbc0d5d1e64e21d61cd4665e7c2" + integrity sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ== + encode32@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/encode32/-/encode32-1.1.0.tgz#0c54b45fb314ad5502e3c230cb95acdc5e5cd1dd" @@ -2254,6 +2347,11 @@ fd-slicer@~1.1.0: dependencies: pend "~1.2.0" +fecha@^4.2.0: + version "4.2.3" + resolved "https://registry.yarnpkg.com/fecha/-/fecha-4.2.3.tgz#4d9ccdbc61e8629b259fdca67e65891448d569fd" + integrity sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw== + file-entry-cache@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" @@ -2339,6 +2437,11 @@ flow-remove-types@^2.217.1: pirates "^3.0.2" vlq "^0.2.1" +fn.name@1.x.x: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fn.name/-/fn.name-1.1.0.tgz#26cad8017967aea8731bc42961d04a3d5988accc" + integrity sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw== + follow-redirects@^1.14.8, follow-redirects@^1.14.9: version "1.15.2" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13" @@ -3344,6 +3447,11 @@ keyv@^4.5.3: dependencies: json-buffer "3.0.1" +kuler@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3" + integrity sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A== + ky@^0.33.1: version "0.33.3" resolved "https://registry.yarnpkg.com/ky/-/ky-0.33.3.tgz#bf1ad322a3f2c3428c13cfa4b3af95e6c4a2f543" @@ -3500,11 +3608,28 @@ log-symbols@^5.1.0: chalk "^5.0.0" is-unicode-supported "^1.1.0" +logform@^2.3.2, logform@^2.4.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/logform/-/logform-2.6.0.tgz#8c82a983f05d6eaeb2d75e3decae7a768b2bf9b5" + integrity sha512-1ulHeNPp6k/LD8H91o7VYFBng5i1BDE7HoKxVbZiGFidS1Rj65qcywLxX+pVfAPoQJEjRdvKcusKwOupHCVOVQ== + dependencies: + "@colors/colors" "1.6.0" + "@types/triple-beam" "^1.3.2" + fecha "^4.2.0" + ms "^2.1.1" + safe-stable-stringify "^2.3.1" + triple-beam "^1.3.0" + long@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== +long@^5.0.0: + version "5.2.3" + resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" + integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== + loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -3524,6 +3649,11 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" +lru-cache@^9.1.2: + version "9.1.2" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-9.1.2.tgz#255fdbc14b75589d6d0e73644ca167a8db506835" + integrity sha512-ERJq3FOzJTxBbFjZ7iDs+NiK4VI9Wz+RdrrAB8dio1oV+YvdPzUEE4QNiT2VD51DkIbCYRUUzCRkssXCHqSnKQ== + make-dir@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" @@ -4042,6 +4172,13 @@ once@^1.3.0, once@^1.3.1, once@^1.4.0: dependencies: wrappy "1" +one-time@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/one-time/-/one-time-1.0.0.tgz#e06bc174aed214ed58edede573b433bbf827cb45" + integrity sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g== + dependencies: + fn.name "1.x.x" + onetime@^5.1.0: version "5.1.2" resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" @@ -4334,6 +4471,24 @@ prop-types@^15.8.1: object-assign "^4.1.1" react-is "^16.13.1" +protobufjs@7.2.4: + version "7.2.4" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.2.4.tgz#3fc1ec0cdc89dd91aef9ba6037ba07408485c3ae" + integrity sha512-AT+RJgD2sH8phPmCf7OUZR8xGdcJRga4+1cOaXJ64hvcSkVhNcRHOwIxUatPH15+nj59WAGTDv3LSGZPEQbJaQ== + dependencies: + "@protobufjs/aspromise" "^1.1.2" + "@protobufjs/base64" "^1.1.2" + "@protobufjs/codegen" "^2.0.4" + "@protobufjs/eventemitter" "^1.1.0" + "@protobufjs/fetch" "^1.1.0" + "@protobufjs/float" "^1.0.2" + "@protobufjs/inquire" "^1.1.0" + "@protobufjs/path" "^1.1.2" + "@protobufjs/pool" "^1.1.0" + "@protobufjs/utf8" "^1.1.0" + "@types/node" ">=13.7.0" + long "^5.0.0" + protobufjs@^6.8.8: version "6.11.4" resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-6.11.4.tgz#29a412c38bf70d89e537b6d02d904a6f448173aa" @@ -4353,6 +4508,24 @@ protobufjs@^6.8.8: "@types/node" ">=13.7.0" long "^4.0.0" +protobufjs@^7.0.0, protobufjs@^7.2.4: + version "7.2.5" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.2.5.tgz#45d5c57387a6d29a17aab6846dcc283f9b8e7f2d" + integrity sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A== + dependencies: + "@protobufjs/aspromise" "^1.1.2" + "@protobufjs/base64" "^1.1.2" + "@protobufjs/codegen" "^2.0.4" + "@protobufjs/eventemitter" "^1.1.0" + "@protobufjs/fetch" "^1.1.0" + "@protobufjs/float" "^1.0.2" + "@protobufjs/inquire" "^1.1.0" + "@protobufjs/path" "^1.1.2" + "@protobufjs/pool" "^1.1.0" + "@protobufjs/utf8" "^1.1.0" + "@types/node" ">=13.7.0" + long "^5.0.0" + proxy-addr@~2.0.7: version "2.0.7" resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" @@ -4605,6 +4778,11 @@ safe-regex-test@^1.0.0: get-intrinsic "^1.1.3" is-regex "^1.1.4" +safe-stable-stringify@^2.3.1: + version "2.4.3" + resolved "https://registry.yarnpkg.com/safe-stable-stringify/-/safe-stable-stringify-2.4.3.tgz#138c84b6f6edb3db5f8ef3ef7115b8f55ccbf886" + integrity sha512-e2bDA2WJT0wxseVd4lsDP4+3ONX6HpMXQa1ZhFQ7SU+GjvORCmShbCMltrtIDfkYhVHrOcPtj+KhmDBdPdZD1g== + "safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0": version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" @@ -4835,6 +5013,11 @@ ssri@^8.0.0, ssri@^8.0.1: dependencies: minipass "^3.1.1" +stack-trace@0.0.x: + version "0.0.10" + resolved "https://registry.yarnpkg.com/stack-trace/-/stack-trace-0.0.10.tgz#547c70b347e8d32b4e108ea1a2a159e5fdde19c0" + integrity sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg== + statuses@2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" @@ -5078,6 +5261,11 @@ tar@^6.0.2, tar@^6.1.11, tar@^6.1.2: mkdirp "^1.0.3" yallist "^4.0.0" +text-hex@1.0.x: + version "1.0.0" + resolved "https://registry.yarnpkg.com/text-hex/-/text-hex-1.0.0.tgz#69dc9c1b17446ee79a92bf5b884bb4b9127506f5" + integrity sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg== + text-table@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" @@ -5107,6 +5295,11 @@ tr46@~0.0.3: resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== +triple-beam@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/triple-beam/-/triple-beam-1.4.1.tgz#6fde70271dc6e5d73ca0c3b24e2d92afb7441984" + integrity sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg== + tslib@^2.2.0, tslib@^2.4.0: version "2.6.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.1.tgz#fd8c9a0ff42590b25703c0acb3de3d3f4ede0410" @@ -5448,6 +5641,32 @@ wide-align@^1.1.2, wide-align@^1.1.5: dependencies: string-width "^1.0.2 || 2 || 3 || 4" +winston-transport@^4.5.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/winston-transport/-/winston-transport-4.6.0.tgz#f1c1a665ad1b366df72199e27892721832a19e1b" + integrity sha512-wbBA9PbPAHxKiygo7ub7BYRiKxms0tpfU2ljtWzb3SjRjv5yl6Ozuy/TkXf00HTAt+Uylo3gSkNwzc4ME0wiIg== + dependencies: + logform "^2.3.2" + readable-stream "^3.6.0" + triple-beam "^1.3.0" + +winston@^3.9.0: + version "3.11.0" + resolved "https://registry.yarnpkg.com/winston/-/winston-3.11.0.tgz#2d50b0a695a2758bb1c95279f0a88e858163ed91" + integrity sha512-L3yR6/MzZAOl0DsysUXHVjOwv8mKZ71TrA/41EIduGpOOV5LQVodqN+QdQ6BS6PJ/RdIshZhq84P/fStEZkk7g== + dependencies: + "@colors/colors" "^1.6.0" + "@dabh/diagnostics" "^2.0.2" + async "^3.2.3" + is-stream "^2.0.0" + logform "^2.4.0" + one-time "^1.0.0" + readable-stream "^3.4.0" + safe-stable-stringify "^2.3.1" + stack-trace "0.0.x" + triple-beam "^1.3.0" + winston-transport "^4.5.0" + wordwrapjs@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/wordwrapjs/-/wordwrapjs-4.0.1.tgz#d9790bccfb110a0fc7836b5ebce0937b37a8b98f" From 315b92e1647aea790dfaa7389ffa40c43e9407e8 Mon Sep 17 00:00:00 2001 From: Ahren Stevens-Taylor Date: Fri, 12 Jan 2024 21:31:59 +0000 Subject: [PATCH 21/41] 572 add docker tags (#581) * [FEAT]: Docker Tags specific to a build version #572 * fix: dockerhub repo name * feat: add Docker build caches * fix: docker username Fix the DockerHub repository owner name --- .github/workflows/build-and-push-image.yaml | 35 ++++++++++++++++----- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-and-push-image.yaml b/.github/workflows/build-and-push-image.yaml index 17ca5a975..03318320d 100644 --- a/.github/workflows/build-and-push-image.yaml +++ b/.github/workflows/build-and-push-image.yaml @@ -36,6 +36,19 @@ jobs: shell: bash run: echo "repo=${GITHUB_REPOSITORY,,}" >> $GITHUB_OUTPUT id: lowercase_repo + + - name: Check if DockerHub build needed + shell: bash + run: | + # Check if the secret for USERNAME is set (don't even check for the password) + if [[ -z "${{ secrets.DOCKER_USERNAME }}" ]]; then + echo "DockerHub build not needed" + echo "enabled=false" >> $GITHUB_OUTPUT + else + echo "DockerHub build needed" + echo "enabled=true" >> $GITHUB_OUTPUT + fi + id: dockerhub - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -45,6 +58,8 @@ jobs: - name: Log in to Docker Hub uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + # Only login to the Docker Hub if the repo is mintplex/anythingllm, to allow for forks to build on GHCR + if: steps.dockerhub.outputs.enabled == 'true' with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} @@ -61,9 +76,16 @@ jobs: uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 with: images: | - mintplexlabs/anythingllm + ${{ steps.dockerhub.outputs.enabled == 'true' && 'mintplexlabs/anythingllm' || '' }} ghcr.io/${{ github.repository }} - + tags: | + type=raw,value=latest,enable={{is_default_branch}} + type=sha + type=ref,event=branch + type=ref,event=tag + type=ref,event=pr + + - name: Build and push multi-platform Docker image uses: docker/build-push-action@v5 with: @@ -71,8 +93,7 @@ jobs: file: ./docker/Dockerfile push: true platforms: linux/amd64,linux/arm64 - tags: | - ${{ steps.meta.outputs.tags }} - ${{ github.ref_name == 'master' && 'mintplexlabs/anythingllm:latest' || '' }} - ${{ github.ref_name == 'master' && format('ghcr.io/{0}:{1}', steps.lowercase_repo.outputs.repo, 'latest') || '' }} - labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max From e5826d8c24c833a8ac560ed160a52b9de314f587 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Fri, 12 Jan 2024 13:33:11 -0800 Subject: [PATCH 22/41] remove unneeded build step --- .github/workflows/build-and-push-image.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/build-and-push-image.yaml b/.github/workflows/build-and-push-image.yaml index 03318320d..b6f2b3be5 100644 --- a/.github/workflows/build-and-push-image.yaml +++ b/.github/workflows/build-and-push-image.yaml @@ -32,11 +32,6 @@ jobs: - name: Check out the repo uses: actions/checkout@v4 - - name: Parse repository name to lowercase - shell: bash - run: echo "repo=${GITHUB_REPOSITORY,,}" >> $GITHUB_OUTPUT - id: lowercase_repo - - name: Check if DockerHub build needed shell: bash run: | From b4ab0a5e8c889e7dd5e3b83b3614d1bb916ad6b4 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Fri, 12 Jan 2024 14:22:19 -0800 Subject: [PATCH 23/41] nopub sha --- .github/workflows/build-and-push-image.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build-and-push-image.yaml b/.github/workflows/build-and-push-image.yaml index b6f2b3be5..12b274b75 100644 --- a/.github/workflows/build-and-push-image.yaml +++ b/.github/workflows/build-and-push-image.yaml @@ -75,7 +75,6 @@ jobs: ghcr.io/${{ github.repository }} tags: | type=raw,value=latest,enable={{is_default_branch}} - type=sha type=ref,event=branch type=ref,event=tag type=ref,event=pr From 4f6d93159f686500c5e4c5ab60256496de1a18e6 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Sat, 13 Jan 2024 00:32:43 -0800 Subject: [PATCH 24/41] improve native embedder handling of large files (#584) * improve native embedder handling of large files * perf changes * ignore storage tmp --- .../cloudformation/aws_https_instructions.md | 8 ++- server/.gitignore | 1 + server/package.json | 4 +- server/utils/EmbeddingEngines/native/index.js | 65 +++++++++++++++++-- server/yarn.lock | 14 ++-- 5 files changed, 78 insertions(+), 14 deletions(-) diff --git a/cloud-deployments/aws/cloudformation/aws_https_instructions.md b/cloud-deployments/aws/cloudformation/aws_https_instructions.md index 5eb3cc753..39591820b 100644 --- a/cloud-deployments/aws/cloudformation/aws_https_instructions.md +++ b/cloud-deployments/aws/cloudformation/aws_https_instructions.md @@ -64,8 +64,14 @@ server { listen 80; server_name [insert FQDN here]; location / { + # Prevent timeouts on long-running requests. + proxy_connect_timeout 605; + proxy_send_timeout 605; + proxy_read_timeout 605; + send_timeout 605; + keepalive_timeout 605; proxy_pass http://0.0.0.0:3001; - } + } } 3. Enter ':wq' to save the changes to the anything config file diff --git a/server/.gitignore b/server/.gitignore index be4af591d..0913f9663 100644 --- a/server/.gitignore +++ b/server/.gitignore @@ -3,6 +3,7 @@ storage/assets/* !storage/assets/anything-llm.png storage/documents/* +storage/tmp/* storage/vector-cache/*.json storage/exports storage/imports diff --git a/server/package.json b/server/package.json index 69cb790c3..9761125a4 100644 --- a/server/package.json +++ b/server/package.json @@ -27,7 +27,7 @@ "@pinecone-database/pinecone": "^0.1.6", "@prisma/client": "5.3.0", "@qdrant/js-client-rest": "^1.4.0", - "@xenova/transformers": "^2.10.0", + "@xenova/transformers": "^2.14.0", "@zilliz/milvus2-sdk-node": "^2.3.5", "archiver": "^5.3.1", "bcrypt": "^5.1.0", @@ -78,4 +78,4 @@ "nodemon": "^2.0.22", "prettier": "^3.0.3" } -} +} \ No newline at end of file diff --git a/server/utils/EmbeddingEngines/native/index.js b/server/utils/EmbeddingEngines/native/index.js index d2acde32a..789e51fe9 100644 --- a/server/utils/EmbeddingEngines/native/index.js +++ b/server/utils/EmbeddingEngines/native/index.js @@ -1,6 +1,7 @@ const path = require("path"); const fs = require("fs"); const { toChunks } = require("../../helpers"); +const { v4 } = require("uuid"); class NativeEmbedder { constructor() { @@ -15,13 +16,30 @@ class NativeEmbedder { this.dimensions = 384; // Limit of how many strings we can process in a single pass to stay with resource or network limits - this.maxConcurrentChunks = 50; + this.maxConcurrentChunks = 25; this.embeddingMaxChunkLength = 1_000; // Make directory when it does not exist in existing installations if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir); } + #tempfilePath() { + const filename = `${v4()}.tmp`; + const tmpPath = process.env.STORAGE_DIR + ? path.resolve(process.env.STORAGE_DIR, "tmp") + : path.resolve(__dirname, `../../../storage/tmp`); + if (!fs.existsSync(tmpPath)) fs.mkdirSync(tmpPath, { recursive: true }); + return path.resolve(tmpPath, filename); + } + + async #writeToTempfile(filePath, data) { + try { + await fs.promises.appendFile(filePath, data, { encoding: "utf8" }); + } catch (e) { + console.error(`Error writing to tempfile: ${e}`); + } + } + async embedderClient() { if (!fs.existsSync(this.modelPath)) { console.log( @@ -62,18 +80,51 @@ class NativeEmbedder { return result?.[0] || []; } + // If you are thinking you want to edit this function - you probably don't. + // This process was benchmarked heavily on a t3.small (2GB RAM 1vCPU) + // and without careful memory management for the V8 garbage collector + // this function will likely result in an OOM on any resource-constrained deployment. + // To help manage very large documents we run a concurrent write-log each iteration + // to keep the embedding result out of memory. The `maxConcurrentChunk` is set to 25, + // as 50 seems to overflow no matter what. Given the above, memory use hovers around ~30% + // during a very large document (>100K words) but can spike up to 70% before gc. + // This seems repeatable for all document sizes. + // While this does take a while, it is zero set up and is 100% free and on-instance. async embedChunks(textChunks = []) { - const Embedder = await this.embedderClient(); - const embeddingResults = []; - for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) { - const output = await Embedder(chunk, { + const tmpFilePath = this.#tempfilePath(); + const chunks = toChunks(textChunks, this.maxConcurrentChunks); + const chunkLen = chunks.length; + + for (let [idx, chunk] of chunks.entries()) { + if (idx === 0) await this.#writeToTempfile(tmpFilePath, "["); + let data; + let pipeline = await this.embedderClient(); + let output = await pipeline(chunk, { pooling: "mean", normalize: true, }); - if (output.length === 0) continue; - embeddingResults.push(output.tolist()); + + if (output.length === 0) { + pipeline = null; + output = null; + data = null; + continue; + } + + data = JSON.stringify(output.tolist()); + await this.#writeToTempfile(tmpFilePath, data); + console.log(`\x1b[34m[Embedded Chunk ${idx + 1} of ${chunkLen}]\x1b[0m`); + if (chunkLen - 1 !== idx) await this.#writeToTempfile(tmpFilePath, ","); + if (chunkLen - 1 === idx) await this.#writeToTempfile(tmpFilePath, "]"); + pipeline = null; + output = null; + data = null; } + const embeddingResults = JSON.parse( + fs.readFileSync(tmpFilePath, { encoding: "utf-8" }) + ); + fs.rmSync(tmpFilePath, { force: true }); return embeddingResults.length > 0 ? embeddingResults.flat() : null; } } diff --git a/server/yarn.lock b/server/yarn.lock index 175a67947..cc129dfe9 100644 --- a/server/yarn.lock +++ b/server/yarn.lock @@ -269,6 +269,11 @@ dependencies: "@hapi/hoek" "^9.0.0" +"@huggingface/jinja@^0.1.0": + version "0.1.2" + resolved "https://registry.yarnpkg.com/@huggingface/jinja/-/jinja-0.1.2.tgz#073fa0a68ef481a1806b0186bbafd8013e586fbe" + integrity sha512-x5mpbfJt1nKmVep5WNP5VjNsjWApWNj8pPYI+uYMkBWH9bWUJmQmHt2lbf0VCoQd54Oq3XuFEh/UyoVh7rPxmg== + "@humanwhocodes/config-array@^0.11.13": version "0.11.13" resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.13.tgz#075dc9684f40a531d9b26b0822153c1e832ee297" @@ -851,11 +856,12 @@ resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406" integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ== -"@xenova/transformers@^2.10.0": - version "2.10.0" - resolved "https://registry.yarnpkg.com/@xenova/transformers/-/transformers-2.10.0.tgz#ae97d724a3addf78de7314336a9f7b28ed96a140" - integrity sha512-Al9WKiOsimAC3mU9Ef434GkHF0izmeAM7mMMx5npdWsWLAYL8fmJXCrULj6uCfjomMQ7jyN9rDtKpp570hffiw== +"@xenova/transformers@^2.14.0": + version "2.14.0" + resolved "https://registry.yarnpkg.com/@xenova/transformers/-/transformers-2.14.0.tgz#6fe128957e64377ca4fca910e77f6092f3f3512a" + integrity sha512-rQ3O7SW5EM64b6XFZGx3XQ2cfiroefxUwU9ShfSpEZyhd082GvwNJJKndxgaukse1hZP1JUDoT0DfjDiq4IZiw== dependencies: + "@huggingface/jinja" "^0.1.0" onnxruntime-web "1.14.0" sharp "^0.32.0" optionalDependencies: From e150e99e453a310e49bc1340ed450376af8dbb64 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Sun, 14 Jan 2024 11:26:08 -0800 Subject: [PATCH 25/41] bump onboard ai link --- .github/ISSUE_TEMPLATE/01_bug.yml | 2 +- .github/ISSUE_TEMPLATE/02_feature.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/01_bug.yml b/.github/ISSUE_TEMPLATE/01_bug.yml index d1ca5aba9..02ff8a442 100644 --- a/.github/ISSUE_TEMPLATE/01_bug.yml +++ b/.github/ISSUE_TEMPLATE/01_bug.yml @@ -8,7 +8,7 @@ body: value: | Use this template to file a bug report for AnythingLLM. Please be as descriptive as possible to allow everyone to replicate and solve your issue. - Want help contributing a PR? Use our repo chatbot by OnboardAI! https://app.getonboardai.com/chat/github/mintplex-labs/anything-llm + Want help contributing a PR? Use our repo chatbot by OnboardAI! https://learnthisrepo.com/anythingllm - type: dropdown id: runtime diff --git a/.github/ISSUE_TEMPLATE/02_feature.yml b/.github/ISSUE_TEMPLATE/02_feature.yml index 0fd29cbfb..ab2be3abd 100644 --- a/.github/ISSUE_TEMPLATE/02_feature.yml +++ b/.github/ISSUE_TEMPLATE/02_feature.yml @@ -9,7 +9,7 @@ body: Share a new idea for a feature or improvement. Be sure to search existing issues first to avoid duplicates. - Want help contributing a PR? Use our repo chatbot by OnboardAI! https://app.getonboardai.com/chat/github/mintplex-labs/anything-llm + Want help contributing a PR? Use our repo chatbot by OnboardAI! https://learnthisrepo.com/anythingllm - type: textarea From 026849df0224b6a8754f4103530bc015874def62 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Sun, 14 Jan 2024 16:36:17 -0800 Subject: [PATCH 26/41] normalize paths for submit URLs of `remove-documents (#598) normalize paths for submit URLs --- server/endpoints/system.js | 6 ++-- server/utils/files/index.js | 56 +++++++++++++---------------- server/utils/files/purgeDocument.js | 7 ++-- 3 files changed, 29 insertions(+), 40 deletions(-) diff --git a/server/endpoints/system.js b/server/endpoints/system.js index 345bd230a..6d985065c 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -1,7 +1,7 @@ process.env.NODE_ENV === "development" ? require("dotenv").config({ path: `.env.${process.env.NODE_ENV}` }) : require("dotenv").config(); -const { viewLocalFiles } = require("../utils/files"); +const { viewLocalFiles, normalizePath } = require("../utils/files"); const { exportData, unpackAndOverwriteImport } = require("../utils/files/data"); const { checkProcessorAlive, @@ -401,9 +401,7 @@ function systemEndpoints(app) { app.get("/system/data-exports/:filename", (request, response) => { const exportLocation = __dirname + "/../storage/exports/"; - const sanitized = path - .normalize(request.params.filename) - .replace(/^(\.\.(\/|\\|$))+/, ""); + const sanitized = normalizePath(request.params.filename); const finalDestination = path.join(exportLocation, sanitized); if (!fs.existsSync(finalDestination)) { diff --git a/server/utils/files/index.js b/server/utils/files/index.js index b6c7a3070..2ff1d60cc 100644 --- a/server/utils/files/index.js +++ b/server/utils/files/index.js @@ -2,32 +2,6 @@ const fs = require("fs"); const path = require("path"); const { v5: uuidv5 } = require("uuid"); -async function collectDocumentData(folderName = null) { - if (!folderName) throw new Error("No docPath provided in request"); - const folder = - process.env.NODE_ENV === "development" - ? path.resolve(__dirname, `../../storage/documents/${folderName}`) - : path.resolve(process.env.STORAGE_DIR, `documents/${folderName}`); - - const dirExists = fs.existsSync(folder); - if (!dirExists) - throw new Error( - `No documents folder for ${folderName} - did you run collector/main.py for this element?` - ); - - const files = fs.readdirSync(folder); - const fileData = []; - files.forEach((file) => { - if (path.extname(file) === ".json") { - const filePath = path.join(folder, file); - const data = fs.readFileSync(filePath, "utf8"); - console.log(`Parsing document: ${file}`); - fileData.push(JSON.parse(data)); - } - }); - return fileData; -} - // Should take in a folder that is a subfolder of documents // eg: youtube-subject/video-123.json async function fileData(filePath = null) { @@ -35,8 +9,15 @@ async function fileData(filePath = null) { const fullPath = process.env.NODE_ENV === "development" - ? path.resolve(__dirname, `../../storage/documents/${filePath}`) - : path.resolve(process.env.STORAGE_DIR, `documents/${filePath}`); + ? path.resolve( + __dirname, + `../../storage/documents/${normalizePath(filePath)}` + ) + : path.resolve( + process.env.STORAGE_DIR, + `documents/${normalizePath(filePath)}` + ); + const fileExists = fs.existsSync(fullPath); if (!fileExists) return null; @@ -142,11 +123,18 @@ async function storeVectorResult(vectorData = [], filename = null) { async function purgeSourceDocument(filename = null) { if (!filename) return; console.log(`Purging source document of ${filename}.`); - const filePath = process.env.NODE_ENV === "development" - ? path.resolve(__dirname, `../../storage/documents`, filename) - : path.resolve(process.env.STORAGE_DIR, `documents`, filename); + ? path.resolve( + __dirname, + `../../storage/documents`, + normalizePath(filename) + ) + : path.resolve( + process.env.STORAGE_DIR, + `documents`, + normalizePath(filename) + ); if (!fs.existsSync(filePath)) return; fs.rmSync(filePath); @@ -169,12 +157,16 @@ async function purgeVectorCache(filename = null) { return; } +function normalizePath(filepath = "") { + return path.normalize(filepath).replace(/^(\.\.(\/|\\|$))+/, ""); +} + module.exports = { cachedVectorInformation, - collectDocumentData, viewLocalFiles, purgeSourceDocument, purgeVectorCache, storeVectorResult, fileData, + normalizePath, }; diff --git a/server/utils/files/purgeDocument.js b/server/utils/files/purgeDocument.js index 27fe14710..46e9d37da 100644 --- a/server/utils/files/purgeDocument.js +++ b/server/utils/files/purgeDocument.js @@ -1,7 +1,6 @@ const fs = require("fs"); const path = require("path"); - -const { purgeVectorCache, purgeSourceDocument } = require("."); +const { purgeVectorCache, purgeSourceDocument, normalizePath } = require("."); const { Document } = require("../../models/documents"); const { Workspace } = require("../../models/workspace"); @@ -22,10 +21,10 @@ async function purgeFolder(folderName) { ? path.resolve(__dirname, `../../storage/documents`) : path.resolve(process.env.STORAGE_DIR, `documents`); - const folderPath = path.resolve(documentsFolder, folderName); + const folderPath = path.resolve(documentsFolder, normalizePath(folderName)); const filenames = fs .readdirSync(folderPath) - .map((file) => path.join(folderName, file)); + .map((file) => path.join(folderPath, file)); const workspaces = await Workspace.where(); const purgePromises = []; From e1dcd5ded010b03abd6aa32d1bf0668a48e38e17 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Sun, 14 Jan 2024 16:53:44 -0800 Subject: [PATCH 27/41] Normalize pfp path to prevent traversal --- server/endpoints/system.js | 5 +++-- server/utils/files/pfp.js | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/server/endpoints/system.js b/server/endpoints/system.js index 6d985065c..39b77a6a1 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -502,7 +502,8 @@ function systemEndpoints(app) { } const userRecord = await User.get({ id: user.id }); - const oldPfpFilename = userRecord.pfpFilename; + const oldPfpFilename = normalizePath(userRecord.pfpFilename); + console.log("oldPfpFilename", oldPfpFilename); if (oldPfpFilename) { const oldPfpPath = path.join( @@ -536,7 +537,7 @@ function systemEndpoints(app) { try { const user = await userFromSession(request, response); const userRecord = await User.get({ id: user.id }); - const oldPfpFilename = userRecord.pfpFilename; + const oldPfpFilename = normalizePath(userRecord.pfpFilename); console.log("oldPfpFilename", oldPfpFilename); if (oldPfpFilename) { const oldPfpPath = path.join( diff --git a/server/utils/files/pfp.js b/server/utils/files/pfp.js index 943aa595f..dd6ba0fe2 100644 --- a/server/utils/files/pfp.js +++ b/server/utils/files/pfp.js @@ -2,6 +2,7 @@ const path = require("path"); const fs = require("fs"); const { getType } = require("mime"); const { User } = require("../../models/user"); +const { normalizePath } = require("."); function fetchPfp(pfpPath) { if (!fs.existsSync(pfpPath)) { @@ -32,8 +33,7 @@ async function determinePfpFilepath(id) { const basePath = process.env.STORAGE_DIR ? path.join(process.env.STORAGE_DIR, "assets/pfp") : path.join(__dirname, "../../storage/assets/pfp"); - const pfpFilepath = path.join(basePath, pfpFilename); - + const pfpFilepath = path.join(basePath, normalizePath(pfpFilename)); if (!fs.existsSync(pfpFilepath)) return null; return pfpFilepath; } From 7aaa4b38e7112a6cd879c1238310c56b1844c6d8 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Sun, 14 Jan 2024 17:10:49 -0800 Subject: [PATCH 28/41] add flex role to export endpoint --- server/endpoints/system.js | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/server/endpoints/system.js b/server/endpoints/system.js index 39b77a6a1..15db895ad 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -389,15 +389,19 @@ function systemEndpoints(app) { } }); - app.get("/system/data-export", [validatedRequest], async (_, response) => { - try { - const { filename, error } = await exportData(); - response.status(200).json({ filename, error }); - } catch (e) { - console.log(e.message, e); - response.sendStatus(500).end(); + app.get( + "/system/data-export", + [validatedRequest, flexUserRoleValid], + async (_, response) => { + try { + const { filename, error } = await exportData(); + response.status(200).json({ filename, error }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } } - }); + ); app.get("/system/data-exports/:filename", (request, response) => { const exportLocation = __dirname + "/../storage/exports/"; From bd158ce7b1c5e9d09f568a05c277899755fbab47 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 16 Jan 2024 09:32:51 -0800 Subject: [PATCH 29/41] [Feat] Query mode to return no-result when no context found (#601) * Query mode to return no-result when no context found * update default error for sync chat * remove unnecessary type conversion --- .../WorkspaceChat/ChatContainer/index.jsx | 15 ---- frontend/src/models/workspace.js | 16 ---- server/endpoints/api/workspace/index.js | 11 +-- server/endpoints/chat.js | 80 ------------------- server/swagger/openapi.json | 2 +- server/utils/chats/index.js | 26 ++++++ server/utils/chats/stream.js | 28 +++++++ 7 files changed, 61 insertions(+), 117 deletions(-) diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx index 6dd1cdf50..372c79a7c 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/index.jsx @@ -71,21 +71,6 @@ export default function ChatContainer({ workspace, knownHistory = [] }) { return false; } - // TODO: Delete this snippet once we have streaming stable. - // const chatResult = await Workspace.sendChat( - // workspace, - // promptMessage.userMessage, - // window.localStorage.getItem(`workspace_chat_mode_${workspace.slug}`) ?? - // "chat", - // ) - // handleChat( - // chatResult, - // setLoadingResponse, - // setChatHistory, - // remHistory, - // _chatHistory - // ) - await Workspace.streamChat( workspace, promptMessage.userMessage, diff --git a/frontend/src/models/workspace.js b/frontend/src/models/workspace.js index aa0b9f744..d015918d4 100644 --- a/frontend/src/models/workspace.js +++ b/frontend/src/models/workspace.js @@ -168,22 +168,6 @@ const Workspace = { const data = await response.json(); return { response, data }; }, - - // TODO: Deprecated and should be removed from frontend. - sendChat: async function ({ slug }, message, mode = "query") { - const chatResult = await fetch(`${API_BASE}/workspace/${slug}/chat`, { - method: "POST", - body: JSON.stringify({ message, mode }), - headers: baseHeaders(), - }) - .then((res) => res.json()) - .catch((e) => { - console.error(e); - return null; - }); - - return chatResult; - }, }; export default Workspace; diff --git a/server/endpoints/api/workspace/index.js b/server/endpoints/api/workspace/index.js index 032fe41c3..ffead3adb 100644 --- a/server/endpoints/api/workspace/index.js +++ b/server/endpoints/api/workspace/index.js @@ -196,10 +196,11 @@ function apiWorkspaceEndpoints(app) { return; } - await WorkspaceChats.delete({ workspaceId: Number(workspace.id) }); - await DocumentVectors.deleteForWorkspace(Number(workspace.id)); - await Document.delete({ workspaceId: Number(workspace.id) }); - await Workspace.delete({ id: Number(workspace.id) }); + const workspaceId = Number(workspace.id); + await WorkspaceChats.delete({ workspaceId: workspaceId }); + await DocumentVectors.deleteForWorkspace(workspaceId); + await Document.delete({ workspaceId: workspaceId }); + await Workspace.delete({ id: workspaceId }); try { await VectorDb["delete-namespace"]({ namespace: slug }); } catch (e) { @@ -441,7 +442,7 @@ function apiWorkspaceEndpoints(app) { #swagger.tags = ['Workspaces'] #swagger.description = 'Execute a chat with a workspace' #swagger.requestBody = { - description: 'prompt to send to the workspace and the type of conversation (query or chat).', + description: 'Send a prompt to the workspace and the type of conversation (query or chat).
Query: Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.
Chat: Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.', required: true, type: 'object', content: { diff --git a/server/endpoints/chat.js b/server/endpoints/chat.js index d0a2923c5..79fc10132 100644 --- a/server/endpoints/chat.js +++ b/server/endpoints/chat.js @@ -1,7 +1,6 @@ const { v4: uuidv4 } = require("uuid"); const { reqBody, userFromSession, multiUserMode } = require("../utils/http"); const { Workspace } = require("../models/workspace"); -const { chatWithWorkspace } = require("../utils/chats"); const { validatedRequest } = require("../utils/middleware/validatedRequest"); const { WorkspaceChats } = require("../models/workspaceChats"); const { SystemSettings } = require("../models/systemSettings"); @@ -95,85 +94,6 @@ function chatEndpoints(app) { } } ); - - app.post( - "/workspace/:slug/chat", - [validatedRequest], - async (request, response) => { - try { - const user = await userFromSession(request, response); - const { slug } = request.params; - const { message, mode = "query" } = reqBody(request); - - const workspace = multiUserMode(response) - ? await Workspace.getWithUser(user, { slug }) - : await Workspace.get({ slug }); - - if (!workspace) { - response.sendStatus(400).end(); - return; - } - - if (multiUserMode(response) && user.role !== "admin") { - const limitMessagesSetting = await SystemSettings.get({ - label: "limit_user_messages", - }); - const limitMessages = limitMessagesSetting?.value === "true"; - - if (limitMessages) { - const messageLimitSetting = await SystemSettings.get({ - label: "message_limit", - }); - const systemLimit = Number(messageLimitSetting?.value); - - if (!!systemLimit) { - const currentChatCount = await WorkspaceChats.count({ - user_id: user.id, - createdAt: { - gte: new Date(new Date() - 24 * 60 * 60 * 1000), - }, - }); - - if (currentChatCount >= systemLimit) { - response.status(500).json({ - id: uuidv4(), - type: "abort", - textResponse: null, - sources: [], - close: true, - error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`, - }); - return; - } - } - } - } - - const result = await chatWithWorkspace(workspace, message, mode, user); - await Telemetry.sendTelemetry( - "sent_chat", - { - multiUserMode: multiUserMode(response), - LLMSelection: process.env.LLM_PROVIDER || "openai", - Embedder: process.env.EMBEDDING_ENGINE || "inherit", - VectorDbSelection: process.env.VECTOR_DB || "pinecone", - }, - user?.id - ); - response.status(200).json({ ...result }); - } catch (e) { - console.error(e); - response.status(500).json({ - id: uuidv4(), - type: "abort", - textResponse: null, - sources: [], - close: true, - error: e.message, - }); - } - } - ); } module.exports = { chatEndpoints }; diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index 184723ed7..7b675c44b 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -1598,7 +1598,7 @@ } }, "requestBody": { - "description": "prompt to send to the workspace and the type of conversation (query or chat).", + "description": "Send a prompt to the workspace and the type of conversation (query or chat).
Query: Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.
Chat: Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.", "required": true, "type": "object", "content": { diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js index 7e9be6e5b..7fdb47344 100644 --- a/server/utils/chats/index.js +++ b/server/utils/chats/index.js @@ -91,6 +91,18 @@ async function chatWithWorkspace( const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug); const embeddingsCount = await VectorDb.namespaceCount(workspace.slug); if (!hasVectorizedSpace || embeddingsCount === 0) { + if (chatMode === "query") { + return { + id: uuid, + type: "textResponse", + sources: [], + close: true, + error: null, + textResponse: + "There is no relevant information in this workspace to answer your query.", + }; + } + // If there are no embeddings - chat like a normal LLM chat interface. return await emptyEmbeddingChat({ uuid, @@ -131,6 +143,20 @@ async function chatWithWorkspace( }; } + // If in query mode and no sources are found, do not + // let the LLM try to hallucinate a response or use general knowledge + if (chatMode === "query" && sources.length === 0) { + return { + id: uuid, + type: "textResponse", + sources: [], + close: true, + error: null, + textResponse: + "There is no relevant information in this workspace to answer your query.", + }; + } + // Compress message to ensure prompt passes token limit with room for response // and build system messages based on inputs and history. const messages = await LLMConnector.compressMessages( diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index 73437eec5..11d4effd7 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -50,6 +50,19 @@ async function streamChatWithWorkspace( const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug); const embeddingsCount = await VectorDb.namespaceCount(workspace.slug); if (!hasVectorizedSpace || embeddingsCount === 0) { + if (chatMode === "query") { + writeResponseChunk(response, { + id: uuid, + type: "textResponse", + textResponse: + "There is no relevant information in this workspace to answer your query.", + sources: [], + close: true, + error: null, + }); + return; + } + // If there are no embeddings - chat like a normal LLM chat interface. return await streamEmptyEmbeddingChat({ response, @@ -93,6 +106,21 @@ async function streamChatWithWorkspace( return; } + // If in query mode and no sources are found, do not + // let the LLM try to hallucinate a response or use general knowledge + if (chatMode === "query" && sources.length === 0) { + writeResponseChunk(response, { + id: uuid, + type: "textResponse", + textResponse: + "There is no relevant information in this workspace to answer your query.", + sources: [], + close: true, + error: null, + }); + return; + } + // Compress message to ensure prompt passes token limit with room for response // and build system messages based on inputs and history. const messages = await LLMConnector.compressMessages( From f5bb064dee7c9c7c28f4071051f7247dbc42a79f Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 16 Jan 2024 10:37:46 -0800 Subject: [PATCH 30/41] Implement streaming for workspace chats via API (#604) --- server/endpoints/api/workspace/index.js | 148 +++++++++++++++++++++++- server/endpoints/chat.js | 15 +++ server/swagger/openapi.json | 99 ++++++++++++++++ server/utils/chats/stream.js | 2 + 4 files changed, 263 insertions(+), 1 deletion(-) diff --git a/server/endpoints/api/workspace/index.js b/server/endpoints/api/workspace/index.js index ffead3adb..365e8b014 100644 --- a/server/endpoints/api/workspace/index.js +++ b/server/endpoints/api/workspace/index.js @@ -11,6 +11,11 @@ const { const { getVectorDbClass } = require("../../../utils/helpers"); const { multiUserMode, reqBody } = require("../../../utils/http"); const { validApiKey } = require("../../../utils/middleware/validApiKey"); +const { + streamChatWithWorkspace, + writeResponseChunk, + VALID_CHAT_MODE, +} = require("../../../utils/chats/stream"); function apiWorkspaceEndpoints(app) { if (!app) return; @@ -483,7 +488,28 @@ function apiWorkspaceEndpoints(app) { const workspace = await Workspace.get({ slug }); if (!workspace) { - response.sendStatus(400).end(); + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: `Workspace ${slug} is not a valid workspace.`, + }); + return; + } + + if (!message?.length || !VALID_CHAT_MODE.includes(mode)) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: !message?.length + ? "message parameter cannot be empty." + : `${mode} is not a valid mode.`, + }); return; } @@ -506,6 +532,126 @@ function apiWorkspaceEndpoints(app) { } } ); + + app.post( + "/v1/workspace/:slug/stream-chat", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspaces'] + #swagger.description = 'Execute a streamable chat with a workspace' + #swagger.requestBody = { + description: 'Send a prompt to the workspace and the type of conversation (query or chat).
Query: Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.
Chat: Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.', + required: true, + type: 'object', + content: { + "application/json": { + example: { + message: "What is AnythingLLM?", + mode: "query | chat" + } + } + } + } + #swagger.responses[200] = { + content: { + "text/event-stream": { + schema: { + type: 'array', + example: [ + { + id: 'uuid-123', + type: "abort | textResponseChunk", + textResponse: "First chunk", + sources: [], + close: false, + error: "null | text string of the failure mode." + }, + { + id: 'uuid-123', + type: "abort | textResponseChunk", + textResponse: "chunk two", + sources: [], + close: false, + error: "null | text string of the failure mode." + }, + { + id: 'uuid-123', + type: "abort | textResponseChunk", + textResponse: "final chunk of LLM output!", + sources: [{title: "anythingllm.txt", chunk: "This is a context chunk used in the answer of the prompt by the LLM. This will only return in the final chunk."}], + close: true, + error: "null | text string of the failure mode." + } + ] + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug } = request.params; + const { message, mode = "query" } = reqBody(request); + const workspace = await Workspace.get({ slug }); + + if (!workspace) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: `Workspace ${slug} is not a valid workspace.`, + }); + return; + } + + if (!message?.length || !VALID_CHAT_MODE.includes(mode)) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: !message?.length + ? "Message is empty" + : `${mode} is not a valid mode.`, + }); + return; + } + + response.setHeader("Cache-Control", "no-cache"); + response.setHeader("Content-Type", "text/event-stream"); + response.setHeader("Access-Control-Allow-Origin", "*"); + response.setHeader("Connection", "keep-alive"); + response.flushHeaders(); + + await streamChatWithWorkspace(response, workspace, message, mode); + await Telemetry.sendTelemetry("sent_chat", { + LLMSelection: process.env.LLM_PROVIDER || "openai", + Embedder: process.env.EMBEDDING_ENGINE || "inherit", + VectorDbSelection: process.env.VECTOR_DB || "pinecone", + }); + response.end(); + } catch (e) { + console.error(e); + writeResponseChunk(response, { + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: e.message, + }); + response.end(); + } + } + ); } module.exports = { apiWorkspaceEndpoints }; diff --git a/server/endpoints/chat.js b/server/endpoints/chat.js index 79fc10132..adfec0ec3 100644 --- a/server/endpoints/chat.js +++ b/server/endpoints/chat.js @@ -8,6 +8,7 @@ const { Telemetry } = require("../models/telemetry"); const { streamChatWithWorkspace, writeResponseChunk, + VALID_CHAT_MODE, } = require("../utils/chats/stream"); function chatEndpoints(app) { @@ -31,6 +32,20 @@ function chatEndpoints(app) { return; } + if (!message?.length || !VALID_CHAT_MODE.includes(mode)) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: !message?.length + ? "Message is empty." + : `${mode} is not a valid mode.`, + }); + return; + } + response.setHeader("Cache-Control", "no-cache"); response.setHeader("Content-Type", "text/event-stream"); response.setHeader("Access-Control-Allow-Origin", "*"); diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index 7b675c44b..e7b07484a 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -1612,6 +1612,105 @@ } } }, + "/v1/workspace/{slug}/stream-chat": { + "post": { + "tags": [ + "Workspaces" + ], + "description": "Execute a streamable chat with a workspace", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "Authorization", + "in": "header", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "text/event-stream": { + "schema": { + "type": "array", + "example": [ + { + "id": "uuid-123", + "type": "abort | textResponseChunk", + "textResponse": "First chunk", + "sources": [], + "close": false, + "error": "null | text string of the failure mode." + }, + { + "id": "uuid-123", + "type": "abort | textResponseChunk", + "textResponse": "chunk two", + "sources": [], + "close": false, + "error": "null | text string of the failure mode." + }, + { + "id": "uuid-123", + "type": "abort | textResponseChunk", + "textResponse": "final chunk of LLM output!", + "sources": [ + { + "title": "anythingllm.txt", + "chunk": "This is a context chunk used in the answer of the prompt by the LLM. This will only return in the final chunk." + } + ], + "close": true, + "error": "null | text string of the failure mode." + } + ] + } + } + }, + "description": "OK" + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + } + }, + "requestBody": { + "description": "Send a prompt to the workspace and the type of conversation (query or chat).
Query: Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.
Chat: Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.", + "required": true, + "type": "object", + "content": { + "application/json": { + "example": { + "message": "What is AnythingLLM?", + "mode": "query | chat" + } + } + } + } + } + }, "/v1/system/env-dump": { "get": { "tags": [ diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index 11d4effd7..04bb72b90 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -8,6 +8,7 @@ const { chatPrompt, } = require("."); +const VALID_CHAT_MODE = ["chat", "query"]; function writeResponseChunk(response, data) { response.write(`data: ${JSON.stringify(data)}\n\n`); return; @@ -503,6 +504,7 @@ function handleStreamResponses(response, stream, responseProps) { } module.exports = { + VALID_CHAT_MODE, streamChatWithWorkspace, writeResponseChunk, }; From d0a3f1e3e16e0ff6d7450bec0e2d45a0748da95b Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 16 Jan 2024 13:41:01 -0800 Subject: [PATCH 31/41] Fix present diminsions on vectorDBs to be inferred for providers who require it (#605) --- .../EmbeddingEngines/azureOpenAi/index.js | 1 - .../utils/EmbeddingEngines/localAi/index.js | 4 --- server/utils/EmbeddingEngines/native/index.js | 1 - server/utils/EmbeddingEngines/openAi/index.js | 1 - .../utils/vectorDbProviders/milvus/index.js | 20 ++++++----- .../utils/vectorDbProviders/qdrant/index.js | 34 +++++++++++++------ 6 files changed, 35 insertions(+), 26 deletions(-) diff --git a/server/utils/EmbeddingEngines/azureOpenAi/index.js b/server/utils/EmbeddingEngines/azureOpenAi/index.js index 8cde1fc7c..e80b4b734 100644 --- a/server/utils/EmbeddingEngines/azureOpenAi/index.js +++ b/server/utils/EmbeddingEngines/azureOpenAi/index.js @@ -13,7 +13,6 @@ class AzureOpenAiEmbedder { new AzureKeyCredential(process.env.AZURE_OPENAI_KEY) ); this.openai = openai; - this.dimensions = 1536; // Limit of how many strings we can process in a single pass to stay with resource or network limits // https://learn.microsoft.com/en-us/azure/ai-services/openai/faq#i-am-trying-to-use-embeddings-and-received-the-error--invalidrequesterror--too-many-inputs--the-max-number-of-inputs-is-1---how-do-i-fix-this-:~:text=consisting%20of%20up%20to%2016%20inputs%20per%20API%20request diff --git a/server/utils/EmbeddingEngines/localAi/index.js b/server/utils/EmbeddingEngines/localAi/index.js index 6f9d721b9..1480755d7 100644 --- a/server/utils/EmbeddingEngines/localAi/index.js +++ b/server/utils/EmbeddingEngines/localAi/index.js @@ -16,10 +16,6 @@ class LocalAiEmbedder { : {}), }); this.openai = new OpenAIApi(config); - // We don't know this for user's set model so for vectorDB integrations that requires dimensionality - // in schema, we will throw an error. - // Applies to QDrant and Milvus. - this.dimensions = null; // Limit of how many strings we can process in a single pass to stay with resource or network limits this.maxConcurrentChunks = 50; diff --git a/server/utils/EmbeddingEngines/native/index.js b/server/utils/EmbeddingEngines/native/index.js index 789e51fe9..fc933e1b8 100644 --- a/server/utils/EmbeddingEngines/native/index.js +++ b/server/utils/EmbeddingEngines/native/index.js @@ -13,7 +13,6 @@ class NativeEmbedder { : path.resolve(__dirname, `../../../storage/models`) ); this.modelPath = path.resolve(this.cacheDir, "Xenova", "all-MiniLM-L6-v2"); - this.dimensions = 384; // Limit of how many strings we can process in a single pass to stay with resource or network limits this.maxConcurrentChunks = 25; diff --git a/server/utils/EmbeddingEngines/openAi/index.js b/server/utils/EmbeddingEngines/openAi/index.js index 31f556e89..105be9d73 100644 --- a/server/utils/EmbeddingEngines/openAi/index.js +++ b/server/utils/EmbeddingEngines/openAi/index.js @@ -9,7 +9,6 @@ class OpenAiEmbedder { }); const openai = new OpenAIApi(config); this.openai = openai; - this.dimensions = 1536; // Limit of how many strings we can process in a single pass to stay with resource or network limits this.maxConcurrentChunks = 500; diff --git a/server/utils/vectorDbProviders/milvus/index.js b/server/utils/vectorDbProviders/milvus/index.js index a9104784b..cc934a9a2 100644 --- a/server/utils/vectorDbProviders/milvus/index.js +++ b/server/utils/vectorDbProviders/milvus/index.js @@ -81,13 +81,15 @@ const Milvus = { await client.dropCollection({ collection_name: namespace }); return true; }, - getOrCreateCollection: async function (client, namespace) { + // Milvus requires a dimension aspect for collection creation + // we pass this in from the first chunk to infer the dimensions like other + // providers do. + getOrCreateCollection: async function (client, namespace, dimensions = null) { const isExists = await this.namespaceExists(client, namespace); if (!isExists) { - const embedder = getEmbeddingEngineSelection(); - if (!embedder.dimensions) + if (!dimensions) throw new Error( - `Your embedder selection has unknown dimensions output. It should be defined when using ${this.name}. Open an issue on Github for support.` + `Milvus:getOrCreateCollection Unable to infer vector dimension from input. Open an issue on Github for support.` ); await client.createCollection({ @@ -104,7 +106,7 @@ const Milvus = { name: "vector", description: "vector", data_type: DataType.FloatVector, - dim: embedder.dimensions, + dim: dimensions, }, { name: "metadata", @@ -131,6 +133,7 @@ const Milvus = { ) { const { DocumentVectors } = require("../../../models/vectors"); try { + let vectorDimension = null; const { pageContent, docId, ...metadata } = documentData; if (!pageContent || pageContent.length == 0) return false; @@ -138,11 +141,11 @@ const Milvus = { const cacheResult = await cachedVectorInformation(fullFilePath); if (cacheResult.exists) { const { client } = await this.connect(); - await this.getOrCreateCollection(client, namespace); - const { chunks } = cacheResult; const documentVectors = []; + vectorDimension = chunks[0][0].values.length || null; + await this.getOrCreateCollection(client, namespace, vectorDimension); for (const chunk of chunks) { // Before sending to Pinecone and saving the records to our db // we need to assign the id of each chunk that is stored in the cached file. @@ -182,6 +185,7 @@ const Milvus = { if (!!vectorValues && vectorValues.length > 0) { for (const [i, vector] of vectorValues.entries()) { + if (!vectorDimension) vectorDimension = vector.length; const vectorRecord = { id: uuidv4(), values: vector, @@ -202,7 +206,7 @@ const Milvus = { if (vectors.length > 0) { const chunks = []; const { client } = await this.connect(); - await this.getOrCreateCollection(client, namespace); + await this.getOrCreateCollection(client, namespace, vectorDimension); console.log("Inserting vectorized chunks into Milvus."); for (const chunk of toChunks(vectors, 100)) { diff --git a/server/utils/vectorDbProviders/qdrant/index.js b/server/utils/vectorDbProviders/qdrant/index.js index ddc3408da..2783cde93 100644 --- a/server/utils/vectorDbProviders/qdrant/index.js +++ b/server/utils/vectorDbProviders/qdrant/index.js @@ -108,19 +108,20 @@ const QDrant = { await client.deleteCollection(namespace); return true; }, - getOrCreateCollection: async function (client, namespace) { + // QDrant requires a dimension aspect for collection creation + // we pass this in from the first chunk to infer the dimensions like other + // providers do. + getOrCreateCollection: async function (client, namespace, dimensions = null) { if (await this.namespaceExists(client, namespace)) { return await client.getCollection(namespace); } - - const embedder = getEmbeddingEngineSelection(); - if (!embedder.dimensions) + if (!dimensions) throw new Error( - `Your embedder selection has unknown dimensions output. It should be defined when using ${this.name}. Open an issue on Github for support.` + `Qdrant:getOrCreateCollection Unable to infer vector dimension from input. Open an issue on Github for support.` ); await client.createCollection(namespace, { vectors: { - size: embedder.dimensions, + size: dimensions, distance: "Cosine", }, }); @@ -133,6 +134,7 @@ const QDrant = { ) { const { DocumentVectors } = require("../../../models/vectors"); try { + let vectorDimension = null; const { pageContent, docId, ...metadata } = documentData; if (!pageContent || pageContent.length == 0) return false; @@ -140,15 +142,20 @@ const QDrant = { const cacheResult = await cachedVectorInformation(fullFilePath); if (cacheResult.exists) { const { client } = await this.connect(); - const collection = await this.getOrCreateCollection(client, namespace); + const { chunks } = cacheResult; + const documentVectors = []; + vectorDimension = chunks[0][0].vector.length || null; + + const collection = await this.getOrCreateCollection( + client, + namespace, + vectorDimension + ); if (!collection) throw new Error("Failed to create new QDrant collection!", { namespace, }); - const { chunks } = cacheResult; - const documentVectors = []; - for (const chunk of chunks) { const submission = { ids: [], @@ -210,6 +217,7 @@ const QDrant = { if (!!vectorValues && vectorValues.length > 0) { for (const [i, vector] of vectorValues.entries()) { + if (!vectorDimension) vectorDimension = vector.length; const vectorRecord = { id: uuidv4(), vector: vector, @@ -233,7 +241,11 @@ const QDrant = { } const { client } = await this.connect(); - const collection = await this.getOrCreateCollection(client, namespace); + const collection = await this.getOrCreateCollection( + client, + namespace, + vectorDimension + ); if (!collection) throw new Error("Failed to create new QDrant collection!", { namespace, From 8b11288764ac9ce407283717daa819cbab3caa57 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Tue, 16 Jan 2024 13:43:32 -0800 Subject: [PATCH 32/41] =?UTF-8?q?truncate=20title=20to=20shorter=20length?= =?UTF-8?q?=20so=20x=20button=20is=20not=20hidden=20for=20longer=E2=80=A6?= =?UTF-8?q?=20(#603)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * truncate title to shorter length so x button is not hidden for longer title names in the citation modal * absolutely position x button on citation modal --- .../ChatHistory/Citation/index.jsx | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx index 1146639ae..c4bda294c 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx @@ -119,21 +119,19 @@ function CitationDetailModal({ source, onClose }) { className="bg-transparent outline-none fixed top-0 left-0 w-full h-full flex items-center justify-center z-10" >
-
-
-

- {truncate(title, 52)} -

- {references > 1 && ( -

- Referenced {references} times. -

- )} -
+
+

+ {truncate(title, 45)} +

+ {references > 1 && ( +

+ Referenced {references} times. +

+ )} @@ -159,6 +157,7 @@ function CitationDetailModal({ source, onClose }) { ); } + function truncateMiddle(title) { if (title.length <= 18) return title; From e973c1edbf42260f9643764e16fac864c70efbc4 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 16 Jan 2024 14:15:46 -0800 Subject: [PATCH 33/41] Preload onboarding (#606) * no lazy onboarding * no lazy onboarding steps * Do not lazy load onboarding to prevent lazy-load white flash --- frontend/src/App.jsx | 3 +- .../src/pages/OnboardingFlow/Steps/index.jsx | 30 ++++++++++++------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index fa74d434e..7c14be4d9 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -8,6 +8,8 @@ import PrivateRoute, { import { ToastContainer } from "react-toastify"; import "react-toastify/dist/ReactToastify.css"; import Login from "@/pages/Login"; +import OnboardingFlow from "@/pages/OnboardingFlow"; + import { PfpProvider } from "./PfpContext"; import { LogoProvider } from "./LogoContext"; @@ -42,7 +44,6 @@ const DataConnectors = lazy( const DataConnectorSetup = lazy( () => import("@/pages/GeneralSettings/DataConnectors/Connectors") ); -const OnboardingFlow = lazy(() => import("@/pages/OnboardingFlow")); export default function App() { return ( diff --git a/frontend/src/pages/OnboardingFlow/Steps/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/index.jsx index 3f218d531..957d94a4a 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/index.jsx @@ -1,16 +1,26 @@ import { ArrowLeft, ArrowRight } from "@phosphor-icons/react"; -import { lazy, useState } from "react"; +import { useState } from "react"; import { isMobile } from "react-device-detect"; +import Home from "./Home"; +import LLMPreference from "./LLMPreference"; +import EmbeddingPreference from "./EmbeddingPreference"; +import VectorDatabaseConnection from "./VectorDatabaseConnection"; +import CustomLogo from "./CustomLogo"; +import UserSetup from "./UserSetup"; +import DataHandling from "./DataHandling"; +import Survey from "./Survey"; +import CreateWorkspace from "./CreateWorkspace"; + const OnboardingSteps = { - home: lazy(() => import("./Home")), - "llm-preference": lazy(() => import("./LLMPreference")), - "embedding-preference": lazy(() => import("./EmbeddingPreference")), - "vector-database": lazy(() => import("./VectorDatabaseConnection")), - "custom-logo": lazy(() => import("./CustomLogo")), - "user-setup": lazy(() => import("./UserSetup")), - "data-handling": lazy(() => import("./DataHandling")), - survey: lazy(() => import("./Survey")), - "create-workspace": lazy(() => import("./CreateWorkspace")), + home: Home, + "llm-preference": LLMPreference, + "embedding-preference": EmbeddingPreference, + "vector-database": VectorDatabaseConnection, + "custom-logo": CustomLogo, + "user-setup": UserSetup, + "data-handling": DataHandling, + survey: Survey, + "create-workspace": CreateWorkspace, }; export default OnboardingSteps; From c61cbd1502e900ebb421bc532647d548f41162d5 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 16 Jan 2024 14:58:49 -0800 Subject: [PATCH 34/41] Add support for fetching single document in documents folder (#607) --- .../ChatHistory/Citation/index.jsx | 1 - server/endpoints/api/document/index.js | 60 ++++++++++++++- server/swagger/openapi.json | 75 +++++++++++++++++++ server/utils/files/index.js | 38 ++++++++++ 4 files changed, 172 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx index c4bda294c..9af36fc5a 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/Citation/index.jsx @@ -157,7 +157,6 @@ function CitationDetailModal({ source, onClose }) { ); } - function truncateMiddle(title) { if (title.length <= 18) return title; diff --git a/server/endpoints/api/document/index.js b/server/endpoints/api/document/index.js index a813e2df6..f1282e7c2 100644 --- a/server/endpoints/api/document/index.js +++ b/server/endpoints/api/document/index.js @@ -6,7 +6,10 @@ const { acceptedFileTypes, processDocument, } = require("../../../utils/files/documentProcessor"); -const { viewLocalFiles } = require("../../../utils/files"); +const { + viewLocalFiles, + findDocumentInDocuments, +} = require("../../../utils/files"); const { handleUploads } = setupMulter(); function apiDocumentEndpoints(app) { @@ -133,6 +136,61 @@ function apiDocumentEndpoints(app) { } }); + app.get("/v1/document/:docName", [validApiKey], async (request, response) => { + /* + #swagger.tags = ['Documents'] + #swagger.description = 'Get a single document by its unique AnythingLLM document name' + #swagger.parameters['docName'] = { + in: 'path', + description: 'Unique document name to find (name in /documents)', + required: true, + type: 'string' + } + #swagger.responses[200] = { + content: { + "application/json": { + schema: { + type: 'object', + example: { + "localFiles": { + "name": "documents", + "type": "folder", + items: [ + { + "name": "my-stored-document.txt-uuid1234.json", + "type": "file", + "id": "bb07c334-4dab-4419-9462-9d00065a49a1", + "url": "file://my-stored-document.txt", + "title": "my-stored-document.txt", + "cached": false + }, + ] + } + } + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { docName } = request.params; + const document = await findDocumentInDocuments(docName); + if (!document) { + response.sendStatus(404).end(); + return; + } + response.status(200).json({ document }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } + }); + app.get( "/v1/document/accepted-file-types", [validApiKey], diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index e7b07484a..7d91579fd 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -953,6 +953,81 @@ } } }, + "/v1/document/{docName}": { + "get": { + "tags": [ + "Documents" + ], + "description": "Get a single document by its unique AnythingLLM document name", + "parameters": [ + { + "name": "docName", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique document name to find (name in /documents)" + }, + { + "name": "Authorization", + "in": "header", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "localFiles": { + "name": "documents", + "type": "folder", + "items": [ + { + "name": "my-stored-document.txt-uuid1234.json", + "type": "file", + "id": "bb07c334-4dab-4419-9462-9d00065a49a1", + "url": "file://my-stored-document.txt", + "title": "my-stored-document.txt", + "cached": false + } + ] + } + } + } + } + } + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, "/v1/document/accepted-file-types": { "get": { "tags": [ diff --git a/server/utils/files/index.js b/server/utils/files/index.js index 2ff1d60cc..e713a318a 100644 --- a/server/utils/files/index.js +++ b/server/utils/files/index.js @@ -157,11 +157,49 @@ async function purgeVectorCache(filename = null) { return; } +// Search for a specific document by its unique name in the entire `documents` +// folder via iteration of all folders and checking if the expected file exists. +async function findDocumentInDocuments(documentName = null) { + if (!documentName) return null; + const documentsFolder = + process.env.NODE_ENV === "development" + ? path.resolve(__dirname, `../../storage/documents`) + : path.resolve(process.env.STORAGE_DIR, `documents`); + + for (const folder of fs.readdirSync(documentsFolder)) { + const isFolder = fs + .lstatSync(path.join(documentsFolder, folder)) + .isDirectory(); + if (!isFolder) continue; + + const targetFilename = normalizePath(documentName); + const targetFileLocation = path.join( + documentsFolder, + folder, + targetFilename + ); + if (!fs.existsSync(targetFileLocation)) continue; + + const fileData = fs.readFileSync(targetFileLocation, "utf8"); + const cachefilename = `${folder}/${targetFilename}`; + const { pageContent, ...metadata } = JSON.parse(fileData); + return { + name: targetFilename, + type: "file", + ...metadata, + cached: await cachedVectorInformation(cachefilename, true), + }; + } + + return null; +} + function normalizePath(filepath = "") { return path.normalize(filepath).replace(/^(\.\.(\/|\\|$))+/, ""); } module.exports = { + findDocumentInDocuments, cachedVectorInformation, viewLocalFiles, purgeSourceDocument, From b35feede879c543e2d6cb58c89f973b29073ecc0 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Tue, 16 Jan 2024 16:04:22 -0800 Subject: [PATCH 35/41] 570 document api return object (#608) * Add support for fetching single document in documents folder * Add document object to upload + support link scraping via API * hotfixes for documentation * update api docs --- collector/index.js | 16 ++- collector/processLink/convert/generic.js | 13 +- .../processSingleFile/convert/asAudio.js | 14 +- collector/processSingleFile/convert/asDocx.js | 13 +- collector/processSingleFile/convert/asMbox.js | 15 ++- .../processSingleFile/convert/asOfficeMime.js | 13 +- collector/processSingleFile/convert/asPDF.js | 13 +- collector/processSingleFile/convert/asTxt.js | 13 +- collector/processSingleFile/index.js | 4 + collector/utils/files/index.js | 19 ++- server/endpoints/api/document/index.js | 121 +++++++++++++++++- server/endpoints/api/workspace/index.js | 4 +- server/swagger/openapi.json | 105 ++++++++++++++- server/utils/files/documentProcessor.js | 4 +- 14 files changed, 324 insertions(+), 43 deletions(-) diff --git a/collector/index.js b/collector/index.js index 5070ae72f..062d78959 100644 --- a/collector/index.js +++ b/collector/index.js @@ -29,14 +29,21 @@ app.post("/process", async function (request, response) { const targetFilename = path .normalize(filename) .replace(/^(\.\.(\/|\\|$))+/, ""); - const { success, reason } = await processSingleFile(targetFilename); - response.status(200).json({ filename: targetFilename, success, reason }); + const { + success, + reason, + documents = [], + } = await processSingleFile(targetFilename); + response + .status(200) + .json({ filename: targetFilename, success, reason, documents }); } catch (e) { console.error(e); response.status(200).json({ filename: filename, success: false, reason: "A processing error occurred.", + documents: [], }); } return; @@ -45,14 +52,15 @@ app.post("/process", async function (request, response) { app.post("/process-link", async function (request, response) { const { link } = reqBody(request); try { - const { success, reason } = await processLink(link); - response.status(200).json({ url: link, success, reason }); + const { success, reason, documents = [] } = await processLink(link); + response.status(200).json({ url: link, success, reason, documents }); } catch (e) { console.error(e); response.status(200).json({ url: link, success: false, reason: "A processing error occurred.", + documents: [], }); } return; diff --git a/collector/processLink/convert/generic.js b/collector/processLink/convert/generic.js index f42dcd171..c6431d733 100644 --- a/collector/processLink/convert/generic.js +++ b/collector/processLink/convert/generic.js @@ -12,7 +12,11 @@ async function scrapeGenericUrl(link) { if (!content.length) { console.error(`Resulting URL content was empty at ${link}.`); - return { success: false, reason: `No URL content found at ${link}.` }; + return { + success: false, + reason: `No URL content found at ${link}.`, + documents: [], + }; } const url = new URL(link); @@ -32,9 +36,12 @@ async function scrapeGenericUrl(link) { token_count_estimate: tokenizeString(content).length, }; - writeToServerDocuments(data, `url-${slugify(filename)}-${data.id}`); + const document = writeToServerDocuments( + data, + `url-${slugify(filename)}-${data.id}` + ); console.log(`[SUCCESS]: URL ${link} converted & ready for embedding.\n`); - return { success: true, reason: null }; + return { success: true, reason: null, documents: [document] }; } async function getPageContent(link) { diff --git a/collector/processSingleFile/convert/asAudio.js b/collector/processSingleFile/convert/asAudio.js index a15207fba..7688d7b85 100644 --- a/collector/processSingleFile/convert/asAudio.js +++ b/collector/processSingleFile/convert/asAudio.js @@ -31,6 +31,7 @@ async function asAudio({ fullFilePath = "", filename = "" }) { return { success: false, reason: `Failed to parse content from ${filename}.`, + documents: [], }; } @@ -43,7 +44,11 @@ async function asAudio({ fullFilePath = "", filename = "" }) { if (!content.length) { console.error(`Resulting text content was empty for ${filename}.`); trashFile(fullFilePath); - return { success: false, reason: `No text content found in ${filename}.` }; + return { + success: false, + reason: `No text content found in ${filename}.`, + documents: [], + }; } const data = { @@ -60,12 +65,15 @@ async function asAudio({ fullFilePath = "", filename = "" }) { token_count_estimate: tokenizeString(content).length, }; - writeToServerDocuments(data, `${slugify(filename)}-${data.id}`); + const document = writeToServerDocuments( + data, + `${slugify(filename)}-${data.id}` + ); trashFile(fullFilePath); console.log( `[SUCCESS]: ${filename} transcribed, converted & ready for embedding.\n` ); - return { success: true, reason: null }; + return { success: true, reason: null, documents: [document] }; } async function convertToWavAudioData(sourcePath) { diff --git a/collector/processSingleFile/convert/asDocx.js b/collector/processSingleFile/convert/asDocx.js index 7a64a042d..b4fe7d2c9 100644 --- a/collector/processSingleFile/convert/asDocx.js +++ b/collector/processSingleFile/convert/asDocx.js @@ -24,7 +24,11 @@ async function asDocX({ fullFilePath = "", filename = "" }) { if (!pageContent.length) { console.error(`Resulting text content was empty for ${filename}.`); trashFile(fullFilePath); - return { success: false, reason: `No text content found in ${filename}.` }; + return { + success: false, + reason: `No text content found in ${filename}.`, + documents: [], + }; } const content = pageContent.join(""); @@ -42,10 +46,13 @@ async function asDocX({ fullFilePath = "", filename = "" }) { token_count_estimate: tokenizeString(content).length, }; - writeToServerDocuments(data, `${slugify(filename)}-${data.id}`); + const document = writeToServerDocuments( + data, + `${slugify(filename)}-${data.id}` + ); trashFile(fullFilePath); console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`); - return { success: true, reason: null }; + return { success: true, reason: null, documents: [document] }; } module.exports = asDocX; diff --git a/collector/processSingleFile/convert/asMbox.js b/collector/processSingleFile/convert/asMbox.js index 30883f21b..f62f6b2ba 100644 --- a/collector/processSingleFile/convert/asMbox.js +++ b/collector/processSingleFile/convert/asMbox.js @@ -22,10 +22,15 @@ async function asMbox({ fullFilePath = "", filename = "" }) { if (!mails.length) { console.error(`Resulting mail items was empty for ${filename}.`); trashFile(fullFilePath); - return { success: false, reason: `No mail items found in ${filename}.` }; + return { + success: false, + reason: `No mail items found in ${filename}.`, + documents: [], + }; } let item = 1; + const documents = []; for (const mail of mails) { if (!mail.hasOwnProperty("text")) continue; @@ -52,14 +57,18 @@ async function asMbox({ fullFilePath = "", filename = "" }) { }; item++; - writeToServerDocuments(data, `${slugify(filename)}-${data.id}-msg-${item}`); + const document = writeToServerDocuments( + data, + `${slugify(filename)}-${data.id}-msg-${item}` + ); + documents.push(document); } trashFile(fullFilePath); console.log( `[SUCCESS]: ${filename} messages converted & ready for embedding.\n` ); - return { success: true, reason: null }; + return { success: true, reason: null, documents }; } module.exports = asMbox; diff --git a/collector/processSingleFile/convert/asOfficeMime.js b/collector/processSingleFile/convert/asOfficeMime.js index a6eb0351a..45b316610 100644 --- a/collector/processSingleFile/convert/asOfficeMime.js +++ b/collector/processSingleFile/convert/asOfficeMime.js @@ -20,7 +20,11 @@ async function asOfficeMime({ fullFilePath = "", filename = "" }) { if (!content.length) { console.error(`Resulting text content was empty for ${filename}.`); trashFile(fullFilePath); - return { success: false, reason: `No text content found in ${filename}.` }; + return { + success: false, + reason: `No text content found in ${filename}.`, + documents: [], + }; } const data = { @@ -37,10 +41,13 @@ async function asOfficeMime({ fullFilePath = "", filename = "" }) { token_count_estimate: tokenizeString(content).length, }; - writeToServerDocuments(data, `${slugify(filename)}-${data.id}`); + const document = writeToServerDocuments( + data, + `${slugify(filename)}-${data.id}` + ); trashFile(fullFilePath); console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`); - return { success: true, reason: null }; + return { success: true, reason: null, documents: [document] }; } module.exports = asOfficeMime; diff --git a/collector/processSingleFile/convert/asPDF.js b/collector/processSingleFile/convert/asPDF.js index f6d869d5c..b89b97411 100644 --- a/collector/processSingleFile/convert/asPDF.js +++ b/collector/processSingleFile/convert/asPDF.js @@ -29,7 +29,11 @@ async function asPDF({ fullFilePath = "", filename = "" }) { if (!pageContent.length) { console.error(`Resulting text content was empty for ${filename}.`); trashFile(fullFilePath); - return { success: false, reason: `No text content found in ${filename}.` }; + return { + success: false, + reason: `No text content found in ${filename}.`, + documents: [], + }; } const content = pageContent.join(""); @@ -47,10 +51,13 @@ async function asPDF({ fullFilePath = "", filename = "" }) { token_count_estimate: tokenizeString(content).length, }; - writeToServerDocuments(data, `${slugify(filename)}-${data.id}`); + const document = writeToServerDocuments( + data, + `${slugify(filename)}-${data.id}` + ); trashFile(fullFilePath); console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`); - return { success: true, reason: null }; + return { success: true, reason: null, documents: [document] }; } module.exports = asPDF; diff --git a/collector/processSingleFile/convert/asTxt.js b/collector/processSingleFile/convert/asTxt.js index ad35e5476..cf7260d4b 100644 --- a/collector/processSingleFile/convert/asTxt.js +++ b/collector/processSingleFile/convert/asTxt.js @@ -19,7 +19,11 @@ async function asTxt({ fullFilePath = "", filename = "" }) { if (!content?.length) { console.error(`Resulting text content was empty for ${filename}.`); trashFile(fullFilePath); - return { success: false, reason: `No text content found in ${filename}.` }; + return { + success: false, + reason: `No text content found in ${filename}.`, + documents: [], + }; } console.log(`-- Working ${filename} --`); @@ -37,10 +41,13 @@ async function asTxt({ fullFilePath = "", filename = "" }) { token_count_estimate: tokenizeString(content).length, }; - writeToServerDocuments(data, `${slugify(filename)}-${data.id}`); + const document = writeToServerDocuments( + data, + `${slugify(filename)}-${data.id}` + ); trashFile(fullFilePath); console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`); - return { success: true, reason: null }; + return { success: true, reason: null, documents: [document] }; } module.exports = asTxt; diff --git a/collector/processSingleFile/index.js b/collector/processSingleFile/index.js index 37c9fd5c5..9efd3a70f 100644 --- a/collector/processSingleFile/index.js +++ b/collector/processSingleFile/index.js @@ -13,11 +13,13 @@ async function processSingleFile(targetFilename) { return { success: false, reason: "Filename is a reserved filename and cannot be processed.", + documents: [], }; if (!fs.existsSync(fullFilePath)) return { success: false, reason: "File does not exist in upload directory.", + documents: [], }; const fileExtension = path.extname(fullFilePath).toLowerCase(); @@ -25,6 +27,7 @@ async function processSingleFile(targetFilename) { return { success: false, reason: `No file extension found. This file cannot be processed.`, + documents: [], }; } @@ -33,6 +36,7 @@ async function processSingleFile(targetFilename) { return { success: false, reason: `File extension ${fileExtension} not supported for parsing.`, + documents: [], }; } diff --git a/collector/utils/files/index.js b/collector/utils/files/index.js index 915c4ac10..caf33c888 100644 --- a/collector/utils/files/index.js +++ b/collector/utils/files/index.js @@ -38,14 +38,19 @@ function writeToServerDocuments( ); if (!fs.existsSync(destination)) fs.mkdirSync(destination, { recursive: true }); - const destinationFilePath = path.resolve(destination, filename); + const destinationFilePath = path.resolve(destination, filename) + ".json"; - fs.writeFileSync( - destinationFilePath + ".json", - JSON.stringify(data, null, 4), - { encoding: "utf-8" } - ); - return; + fs.writeFileSync(destinationFilePath, JSON.stringify(data, null, 4), { + encoding: "utf-8", + }); + + return { + ...data, + // relative location string that can be passed into the /update-embeddings api + // that will work since we know the location exists and since we only allow + // 1-level deep folders this will always work. This still works for integrations like GitHub and YouTube. + location: destinationFilePath.split("/").slice(-2).join("/"), + }; } // When required we can wipe the entire collector hotdir and tmp storage in case diff --git a/server/endpoints/api/document/index.js b/server/endpoints/api/document/index.js index f1282e7c2..817043526 100644 --- a/server/endpoints/api/document/index.js +++ b/server/endpoints/api/document/index.js @@ -5,11 +5,13 @@ const { checkProcessorAlive, acceptedFileTypes, processDocument, + processLink, } = require("../../../utils/files/documentProcessor"); const { viewLocalFiles, findDocumentInDocuments, } = require("../../../utils/files"); +const { reqBody } = require("../../../utils/http"); const { handleUploads } = setupMulter(); function apiDocumentEndpoints(app) { @@ -23,7 +25,6 @@ function apiDocumentEndpoints(app) { /* #swagger.tags = ['Documents'] #swagger.description = 'Upload a new file to AnythingLLM to be parsed and prepared for embedding.' - #swagger.requestBody = { description: 'File to be uploaded.', required: true, @@ -50,6 +51,21 @@ function apiDocumentEndpoints(app) { example: { success: true, error: null, + documents: [ + { + "location": "custom-documents/anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json", + "name": "anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json", + "url": "file:///Users/tim/Documents/anything-llm/collector/hotdir/anythingllm.txt", + "title": "anythingllm.txt", + "docAuthor": "Unknown", + "description": "Unknown", + "docSource": "a text file uploaded by the user.", + "chunkSource": "anythingllm.txt", + "published": "1/16/2024, 3:07:00 PM", + "wordCount": 93, + "token_count_estimate": 115, + } + ] } } } @@ -75,16 +91,113 @@ function apiDocumentEndpoints(app) { .end(); } - const { success, reason } = await processDocument(originalname); + const { success, reason, documents } = + await processDocument(originalname); if (!success) { - response.status(500).json({ success: false, error: reason }).end(); + response + .status(500) + .json({ success: false, error: reason, documents }) + .end(); + return; } console.log( `Document ${originalname} uploaded processed and successfully. It is now available in documents.` ); await Telemetry.sendTelemetry("document_uploaded"); - response.status(200).json({ success: true, error: null }); + response.status(200).json({ success: true, error: null, documents }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } + } + ); + + app.post( + "/v1/document/upload-link", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Documents'] + #swagger.description = 'Upload a valid URL for AnythingLLM to scrape and prepare for embedding.' + #swagger.requestBody = { + description: 'Link of web address to be scraped.', + required: true, + type: 'file', + content: { + "application/json": { + schema: { + type: 'object', + example: { + "link": "https://useanything.com" + } + } + } + } + } + #swagger.responses[200] = { + content: { + "application/json": { + schema: { + type: 'object', + example: { + success: true, + error: null, + documents: [ + { + "id": "c530dbe6-bff1-4b9e-b87f-710d539d20bc", + "url": "file://useanything_com.html", + "title": "useanything_com.html", + "docAuthor": "no author found", + "description": "No description found.", + "docSource": "URL link uploaded by the user.", + "chunkSource": "https:useanything.com.html", + "published": "1/16/2024, 3:46:33 PM", + "wordCount": 252, + "pageContent": "AnythingLLM is the best....", + "token_count_estimate": 447, + "location": "custom-documents/url-useanything_com-c530dbe6-bff1-4b9e-b87f-710d539d20bc.json" + } + ] + } + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { link } = reqBody(request); + const processingOnline = await checkProcessorAlive(); + + if (!processingOnline) { + response + .status(500) + .json({ + success: false, + error: `Document processing API is not online. Link ${link} will not be processed automatically.`, + }) + .end(); + } + + const { success, reason, documents } = await processLink(link); + if (!success) { + response + .status(500) + .json({ success: false, error: reason, documents }) + .end(); + return; + } + + console.log( + `Link ${link} uploaded processed and successfully. It is now available in documents.` + ); + await Telemetry.sendTelemetry("document_uploaded"); + response.status(200).json({ success: true, error: null, documents }); } catch (e) { console.log(e.message, e); response.sendStatus(500).end(); diff --git a/server/endpoints/api/workspace/index.js b/server/endpoints/api/workspace/index.js index 365e8b014..c1642ce4a 100644 --- a/server/endpoints/api/workspace/index.js +++ b/server/endpoints/api/workspace/index.js @@ -381,8 +381,8 @@ function apiWorkspaceEndpoints(app) { content: { "application/json": { example: { - adds: [], - deletes: ["custom-documents/anythingllm-hash.json"] + adds: ["custom-documents/my-pdf.pdf-hash.json"], + deletes: ["custom-documents/anythingllm.txt-hash.json"] } } } diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index 7d91579fd..c7532059d 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -845,7 +845,22 @@ "type": "object", "example": { "success": true, - "error": null + "error": null, + "documents": [ + { + "location": "custom-documents/anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json", + "name": "anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json", + "url": "file://Users/tim/Documents/anything-llm/collector/hotdir/anythingllm.txt", + "title": "anythingllm.txt", + "docAuthor": "Unknown", + "description": "Unknown", + "docSource": "a text file uploaded by the user.", + "chunkSource": "anythingllm.txt", + "published": "1/16/2024, 3:07:00 PM", + "wordCount": 93, + "token_count_estimate": 115 + } + ] } } } @@ -890,6 +905,88 @@ } } }, + "/v1/document/upload-link": { + "post": { + "tags": [ + "Documents" + ], + "description": "Upload a valid URL for AnythingLLM to scrape and prepare for embedding.", + "parameters": [ + { + "name": "Authorization", + "in": "header", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "success": true, + "error": null, + "documents": [ + { + "id": "c530dbe6-bff1-4b9e-b87f-710d539d20bc", + "url": "file://useanything_com.html", + "title": "useanything_com.html", + "docAuthor": "no author found", + "description": "No description found.", + "docSource": "URL link uploaded by the user.", + "chunkSource": "https:useanything.com.html", + "published": "1/16/2024, 3:46:33 PM", + "wordCount": 252, + "pageContent": "AnythingLLM is the best....", + "token_count_estimate": 447, + "location": "custom-documents/url-useanything_com-c530dbe6-bff1-4b9e-b87f-710d539d20bc.json" + } + ] + } + } + } + } + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "500": { + "description": "Internal Server Error" + } + }, + "requestBody": { + "description": "Link of web address to be scraped.", + "required": true, + "type": "file", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "link": "https://useanything.com" + } + } + } + } + } + } + }, "/v1/documents": { "get": { "tags": [ @@ -1593,9 +1690,11 @@ "content": { "application/json": { "example": { - "adds": [], + "adds": [ + "custom-documents/my-pdf.pdf-hash.json" + ], "deletes": [ - "custom-documents/anythingllm-hash.json" + "custom-documents/anythingllm.txt-hash.json" ] } } diff --git a/server/utils/files/documentProcessor.js b/server/utils/files/documentProcessor.js index 5239a8708..27d0f5f2b 100644 --- a/server/utils/files/documentProcessor.js +++ b/server/utils/files/documentProcessor.js @@ -35,7 +35,7 @@ async function processDocument(filename = "") { .then((res) => res) .catch((e) => { console.log(e.message); - return { success: false, reason: e.message }; + return { success: false, reason: e.message, documents: [] }; }); } @@ -55,7 +55,7 @@ async function processLink(link = "") { .then((res) => res) .catch((e) => { console.log(e.message); - return { success: false, reason: e.message }; + return { success: false, reason: e.message, documents: [] }; }); } From bf503ee0e9c9e8fe0164090612a6315a4debeb29 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Tue, 16 Jan 2024 18:23:51 -0800 Subject: [PATCH 36/41] add check to skip empty messages (#602) * add check to skip empty messages * add comment explaining prisma + sqlite not supporting createMany() --- server/models/welcomeMessages.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/models/welcomeMessages.js b/server/models/welcomeMessages.js index 43e2d3f96..88393f36c 100644 --- a/server/models/welcomeMessages.js +++ b/server/models/welcomeMessages.js @@ -31,7 +31,10 @@ const WelcomeMessages = { await prisma.welcome_messages.deleteMany({}); // Delete all existing messages // Create new messages + // We create each message individually because prisma + // with sqlite does not support createMany() for (const [index, message] of messages.entries()) { + if (!message.response) continue; await prisma.welcome_messages.create({ data: { user: message.user, From 90df37582bcccba53282420eb61c8038c4699609 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Wed, 17 Jan 2024 12:59:25 -0800 Subject: [PATCH 37/41] Per workspace model selection (#582) * WIP model selection per workspace (migrations and openai saves properly * revert OpenAiOption * add support for models per workspace for anthropic, localAi, ollama, openAi, and togetherAi * remove unneeded comments * update logic for when LLMProvider is reset, reset Ai provider files with master * remove frontend/api reset of workspace chat and move logic to updateENV add postUpdate callbacks to envs * set preferred model for chat on class instantiation * remove extra param * linting * remove unused var * refactor chat model selection on workspace * linting * add fallback for base path to localai models --------- Co-authored-by: timothycarambat --- .../Settings/ChatModelPreference/index.jsx | 120 ++++++++++++++++++ .../useGetProviderModels.js | 49 +++++++ .../Modals/MangeWorkspace/Settings/index.jsx | 8 +- .../Modals/MangeWorkspace/index.jsx | 1 + .../GeneralSettings/LLMPreference/index.jsx | 6 +- server/endpoints/api/system/index.js | 2 +- server/endpoints/system.js | 6 +- server/models/workspace.js | 15 +++ .../20240113013409_init/migration.sql | 2 + server/prisma/schema.prisma | 1 + server/utils/AiProviders/anthropic/index.js | 5 +- server/utils/AiProviders/azureOpenAi/index.js | 2 +- server/utils/AiProviders/gemini/index.js | 5 +- server/utils/AiProviders/lmStudio/index.js | 4 +- server/utils/AiProviders/localAi/index.js | 4 +- server/utils/AiProviders/native/index.js | 4 +- server/utils/AiProviders/ollama/index.js | 4 +- server/utils/AiProviders/openAi/index.js | 5 +- server/utils/AiProviders/togetherAi/index.js | 4 +- server/utils/chats/index.js | 2 +- server/utils/chats/stream.js | 2 +- server/utils/helpers/customModels.js | 13 +- server/utils/helpers/index.js | 20 +-- server/utils/helpers/updateENV.js | 32 +++-- 24 files changed, 263 insertions(+), 53 deletions(-) create mode 100644 frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/index.jsx create mode 100644 frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/useGetProviderModels.js create mode 100644 server/prisma/migrations/20240113013409_init/migration.sql diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/index.jsx new file mode 100644 index 000000000..ea03c09a9 --- /dev/null +++ b/frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/index.jsx @@ -0,0 +1,120 @@ +import useGetProviderModels, { + DISABLED_PROVIDERS, +} from "./useGetProviderModels"; + +export default function ChatModelSelection({ + settings, + workspace, + setHasChanges, +}) { + const { defaultModels, customModels, loading } = useGetProviderModels( + settings?.LLMProvider + ); + if (DISABLED_PROVIDERS.includes(settings?.LLMProvider)) return null; + + if (loading) { + return ( +
+
+ +

+ The specific chat model that will be used for this workspace. If + empty, will use the system LLM preference. +

+
+ +
+ ); + } + + return ( +
+
+ +

+ The specific chat model that will be used for this workspace. If + empty, will use the system LLM preference. +

+
+ + +
+ ); +} diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/useGetProviderModels.js b/frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/useGetProviderModels.js new file mode 100644 index 000000000..eae1b4adc --- /dev/null +++ b/frontend/src/components/Modals/MangeWorkspace/Settings/ChatModelPreference/useGetProviderModels.js @@ -0,0 +1,49 @@ +import System from "@/models/system"; +import { useEffect, useState } from "react"; + +// Providers which cannot use this feature for workspace<>model selection +export const DISABLED_PROVIDERS = ["azure", "lmstudio"]; +const PROVIDER_DEFAULT_MODELS = { + openai: ["gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview", "gpt-4-32k"], + gemini: ["gemini-pro"], + anthropic: ["claude-2", "claude-instant-1"], + azure: [], + lmstudio: [], + localai: [], + ollama: [], + togetherai: [], + native: [], +}; + +// For togetherAi, which has a large model list - we subgroup the options +// by their creator organization (eg: Meta, Mistral, etc) +// which makes selection easier to read. +function groupModels(models) { + return models.reduce((acc, model) => { + acc[model.organization] = acc[model.organization] || []; + acc[model.organization].push(model); + return acc; + }, {}); +} + +export default function useGetProviderModels(provider = null) { + const [defaultModels, setDefaultModels] = useState([]); + const [customModels, setCustomModels] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function fetchProviderModels() { + if (!provider) return; + const { models = [] } = await System.customModels(provider); + if (PROVIDER_DEFAULT_MODELS.hasOwnProperty(provider)) + setDefaultModels(PROVIDER_DEFAULT_MODELS[provider]); + provider === "togetherai" + ? setCustomModels(groupModels(models)) + : setCustomModels(models); + setLoading(false); + } + fetchProviderModels(); + }, [provider]); + + return { defaultModels, customModels, loading }; +} diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx index 2fce91e1f..a3089d688 100644 --- a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx +++ b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx @@ -6,6 +6,7 @@ import System from "../../../../models/system"; import PreLoader from "../../../Preloader"; import { useParams } from "react-router-dom"; import showToast from "../../../../utils/toast"; +import ChatModelPreference from "./ChatModelPreference"; // Ensure that a type is correct before sending the body // to the backend. @@ -26,7 +27,7 @@ function castToType(key, value) { return definitions[key].cast(value); } -export default function WorkspaceSettings({ active, workspace }) { +export default function WorkspaceSettings({ active, workspace, settings }) { const { slug } = useParams(); const formEl = useRef(null); const [saving, setSaving] = useState(false); @@ -99,6 +100,11 @@ export default function WorkspaceSettings({ active, workspace }) {
+
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx index 287716222..bd6ae511d 100644 --- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx +++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx @@ -30,19 +30,17 @@ export default function GeneralLLMPreference() { const [hasChanges, setHasChanges] = useState(false); const [settings, setSettings] = useState(null); const [loading, setLoading] = useState(true); - const [searchQuery, setSearchQuery] = useState(""); const [filteredLLMs, setFilteredLLMs] = useState([]); const [selectedLLM, setSelectedLLM] = useState(null); - const isHosted = window.location.hostname.includes("useanything.com"); const handleSubmit = async (e) => { e.preventDefault(); const form = e.target; - const data = {}; + const data = { LLMProvider: selectedLLM }; const formData = new FormData(form); - data.LLMProvider = selectedLLM; + for (var [key, value] of formData.entries()) data[key] = value; const { error } = await System.updateSystem(data); setSaving(true); diff --git a/server/endpoints/api/system/index.js b/server/endpoints/api/system/index.js index 3548c3068..b18019b14 100644 --- a/server/endpoints/api/system/index.js +++ b/server/endpoints/api/system/index.js @@ -139,7 +139,7 @@ function apiSystemEndpoints(app) { */ try { const body = reqBody(request); - const { newValues, error } = updateENV(body); + const { newValues, error } = await updateENV(body); if (process.env.NODE_ENV === "production") await dumpENV(); response.status(200).json({ newValues, error }); } catch (e) { diff --git a/server/endpoints/system.js b/server/endpoints/system.js index 15db895ad..e699cf84c 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -290,7 +290,7 @@ function systemEndpoints(app) { } const body = reqBody(request); - const { newValues, error } = updateENV(body); + const { newValues, error } = await updateENV(body); if (process.env.NODE_ENV === "production") await dumpENV(); response.status(200).json({ newValues, error }); } catch (e) { @@ -312,7 +312,7 @@ function systemEndpoints(app) { } const { usePassword, newPassword } = reqBody(request); - const { error } = updateENV( + const { error } = await updateENV( { AuthToken: usePassword ? newPassword : "", JWTSecret: usePassword ? v4() : "", @@ -355,7 +355,7 @@ function systemEndpoints(app) { message_limit: 25, }); - updateENV( + await updateENV( { AuthToken: "", JWTSecret: process.env.JWT_SECRET || v4(), diff --git a/server/models/workspace.js b/server/models/workspace.js index 9139c25e9..6de8053e9 100644 --- a/server/models/workspace.js +++ b/server/models/workspace.js @@ -14,6 +14,7 @@ const Workspace = { "lastUpdatedAt", "openAiPrompt", "similarityThreshold", + "chatModel", ], new: async function (name = null, creatorId = null) { @@ -191,6 +192,20 @@ const Workspace = { return { success: false, error: error.message }; } }, + + resetWorkspaceChatModels: async () => { + try { + await prisma.workspaces.updateMany({ + data: { + chatModel: null, + }, + }); + return { success: true, error: null }; + } catch (error) { + console.error("Error resetting workspace chat models:", error.message); + return { success: false, error: error.message }; + } + }, }; module.exports = { Workspace }; diff --git a/server/prisma/migrations/20240113013409_init/migration.sql b/server/prisma/migrations/20240113013409_init/migration.sql new file mode 100644 index 000000000..09b9448ec --- /dev/null +++ b/server/prisma/migrations/20240113013409_init/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "workspaces" ADD COLUMN "chatModel" TEXT; diff --git a/server/prisma/schema.prisma b/server/prisma/schema.prisma index e9aa8a8a5..2f632a46a 100644 --- a/server/prisma/schema.prisma +++ b/server/prisma/schema.prisma @@ -93,6 +93,7 @@ model workspaces { lastUpdatedAt DateTime @default(now()) openAiPrompt String? similarityThreshold Float? @default(0.25) + chatModel String? workspace_users workspace_users[] documents workspace_documents[] } diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js index 709333231..17f2abc4a 100644 --- a/server/utils/AiProviders/anthropic/index.js +++ b/server/utils/AiProviders/anthropic/index.js @@ -2,7 +2,7 @@ const { v4 } = require("uuid"); const { chatPrompt } = require("../../chats"); class AnthropicLLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { if (!process.env.ANTHROPIC_API_KEY) throw new Error("No Anthropic API key was set."); @@ -12,7 +12,8 @@ class AnthropicLLM { apiKey: process.env.ANTHROPIC_API_KEY, }); this.anthropic = anthropic; - this.model = process.env.ANTHROPIC_MODEL_PREF || "claude-2"; + this.model = + modelPreference || process.env.ANTHROPIC_MODEL_PREF || "claude-2"; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js index 185dac021..f59fc51fa 100644 --- a/server/utils/AiProviders/azureOpenAi/index.js +++ b/server/utils/AiProviders/azureOpenAi/index.js @@ -2,7 +2,7 @@ const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi"); const { chatPrompt } = require("../../chats"); class AzureOpenAiLLM { - constructor(embedder = null) { + constructor(embedder = null, _modelPreference = null) { const { OpenAIClient, AzureKeyCredential } = require("@azure/openai"); if (!process.env.AZURE_OPENAI_ENDPOINT) throw new Error("No Azure API endpoint was set."); diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js index 03388e3e2..348c8f5ed 100644 --- a/server/utils/AiProviders/gemini/index.js +++ b/server/utils/AiProviders/gemini/index.js @@ -1,14 +1,15 @@ const { chatPrompt } = require("../../chats"); class GeminiLLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { if (!process.env.GEMINI_API_KEY) throw new Error("No Gemini API key was set."); // Docs: https://ai.google.dev/tutorials/node_quickstart const { GoogleGenerativeAI } = require("@google/generative-ai"); const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY); - this.model = process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro"; + this.model = + modelPreference || process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro"; this.gemini = genAI.getGenerativeModel({ model: this.model }); this.limits = { history: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js index 28c107df0..614808034 100644 --- a/server/utils/AiProviders/lmStudio/index.js +++ b/server/utils/AiProviders/lmStudio/index.js @@ -2,7 +2,7 @@ const { chatPrompt } = require("../../chats"); // hybrid of openAi LLM chat completion for LMStudio class LMStudioLLM { - constructor(embedder = null) { + constructor(embedder = null, _modelPreference = null) { if (!process.env.LMSTUDIO_BASE_PATH) throw new Error("No LMStudio API Base Path was set."); @@ -12,7 +12,7 @@ class LMStudioLLM { }); this.lmstudio = new OpenAIApi(config); // When using LMStudios inference server - the model param is not required so - // we can stub it here. + // we can stub it here. LMStudio can only run one model at a time. this.model = "model-placeholder"; this.limits = { history: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js index 84954c994..6623ac88e 100644 --- a/server/utils/AiProviders/localAi/index.js +++ b/server/utils/AiProviders/localAi/index.js @@ -1,7 +1,7 @@ const { chatPrompt } = require("../../chats"); class LocalAiLLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { if (!process.env.LOCAL_AI_BASE_PATH) throw new Error("No LocalAI Base Path was set."); @@ -15,7 +15,7 @@ class LocalAiLLM { : {}), }); this.openai = new OpenAIApi(config); - this.model = process.env.LOCAL_AI_MODEL_PREF; + this.model = modelPreference || process.env.LOCAL_AI_MODEL_PREF; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js index faac4fa03..66cc84d0c 100644 --- a/server/utils/AiProviders/native/index.js +++ b/server/utils/AiProviders/native/index.js @@ -10,11 +10,11 @@ const ChatLlamaCpp = (...args) => ); class NativeLLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { if (!process.env.NATIVE_LLM_MODEL_PREF) throw new Error("No local Llama model was set."); - this.model = process.env.NATIVE_LLM_MODEL_PREF || null; + this.model = modelPreference || process.env.NATIVE_LLM_MODEL_PREF || null; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js index 55205c23d..fce96f369 100644 --- a/server/utils/AiProviders/ollama/index.js +++ b/server/utils/AiProviders/ollama/index.js @@ -3,12 +3,12 @@ const { StringOutputParser } = require("langchain/schema/output_parser"); // Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md class OllamaAILLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { if (!process.env.OLLAMA_BASE_PATH) throw new Error("No Ollama Base Path was set."); this.basePath = process.env.OLLAMA_BASE_PATH; - this.model = process.env.OLLAMA_MODEL_PREF; + this.model = modelPreference || process.env.OLLAMA_MODEL_PREF; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index ccc7ba0e9..038d201d1 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -2,7 +2,7 @@ const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); const { chatPrompt } = require("../../chats"); class OpenAiLLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { const { Configuration, OpenAIApi } = require("openai"); if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set."); @@ -10,7 +10,8 @@ class OpenAiLLM { apiKey: process.env.OPEN_AI_KEY, }); this.openai = new OpenAIApi(config); - this.model = process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo"; + this.model = + modelPreference || process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo"; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js index df64c413e..44061dd0a 100644 --- a/server/utils/AiProviders/togetherAi/index.js +++ b/server/utils/AiProviders/togetherAi/index.js @@ -6,7 +6,7 @@ function togetherAiModels() { } class TogetherAiLLM { - constructor(embedder = null) { + constructor(embedder = null, modelPreference = null) { const { Configuration, OpenAIApi } = require("openai"); if (!process.env.TOGETHER_AI_API_KEY) throw new Error("No TogetherAI API key was set."); @@ -16,7 +16,7 @@ class TogetherAiLLM { apiKey: process.env.TOGETHER_AI_API_KEY, }); this.openai = new OpenAIApi(config); - this.model = process.env.TOGETHER_AI_MODEL_PREF; + this.model = modelPreference || process.env.TOGETHER_AI_MODEL_PREF; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js index 7fdb47344..d63de47d5 100644 --- a/server/utils/chats/index.js +++ b/server/utils/chats/index.js @@ -71,7 +71,7 @@ async function chatWithWorkspace( return await VALID_COMMANDS[command](workspace, message, uuid, user); } - const LLMConnector = getLLMProvider(); + const LLMConnector = getLLMProvider(workspace?.chatModel); const VectorDb = getVectorDbClass(); const { safe, reasons = [] } = await LLMConnector.isSafe(message); if (!safe) { diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index 04bb72b90..ceea8d7d2 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -30,7 +30,7 @@ async function streamChatWithWorkspace( return; } - const LLMConnector = getLLMProvider(); + const LLMConnector = getLLMProvider(workspace?.chatModel); const VectorDb = getVectorDbClass(); const { safe, reasons = [] } = await LLMConnector.isSafe(message); if (!safe) { diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index 54976895e..87fe976ec 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -17,7 +17,7 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) { case "localai": return await localAIModels(basePath, apiKey); case "ollama": - return await ollamaAIModels(basePath, apiKey); + return await ollamaAIModels(basePath); case "togetherai": return await getTogetherAiModels(); case "native-llm": @@ -53,7 +53,7 @@ async function openAiModels(apiKey = null) { async function localAIModels(basePath = null, apiKey = null) { const { Configuration, OpenAIApi } = require("openai"); const config = new Configuration({ - basePath, + basePath: basePath || process.env.LOCAL_AI_BASE_PATH, apiKey: apiKey || process.env.LOCAL_AI_API_KEY, }); const openai = new OpenAIApi(config); @@ -70,13 +70,14 @@ async function localAIModels(basePath = null, apiKey = null) { return { models, error: null }; } -async function ollamaAIModels(basePath = null, _apiKey = null) { +async function ollamaAIModels(basePath = null) { let url; try { - new URL(basePath); - if (basePath.split("").slice(-1)?.[0] === "/") + let urlPath = basePath ?? process.env.OLLAMA_BASE_PATH; + new URL(urlPath); + if (urlPath.split("").slice(-1)?.[0] === "/") throw new Error("BasePath Cannot end in /!"); - url = basePath; + url = urlPath; } catch { return { models: [], error: "Not a valid URL." }; } diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index 1685acc1a..2b1f3dacf 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -24,37 +24,37 @@ function getVectorDbClass() { } } -function getLLMProvider() { +function getLLMProvider(modelPreference = null) { const vectorSelection = process.env.LLM_PROVIDER || "openai"; const embedder = getEmbeddingEngineSelection(); switch (vectorSelection) { case "openai": const { OpenAiLLM } = require("../AiProviders/openAi"); - return new OpenAiLLM(embedder); + return new OpenAiLLM(embedder, modelPreference); case "azure": const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi"); - return new AzureOpenAiLLM(embedder); + return new AzureOpenAiLLM(embedder, modelPreference); case "anthropic": const { AnthropicLLM } = require("../AiProviders/anthropic"); - return new AnthropicLLM(embedder); + return new AnthropicLLM(embedder, modelPreference); case "gemini": const { GeminiLLM } = require("../AiProviders/gemini"); - return new GeminiLLM(embedder); + return new GeminiLLM(embedder, modelPreference); case "lmstudio": const { LMStudioLLM } = require("../AiProviders/lmStudio"); - return new LMStudioLLM(embedder); + return new LMStudioLLM(embedder, modelPreference); case "localai": const { LocalAiLLM } = require("../AiProviders/localAi"); - return new LocalAiLLM(embedder); + return new LocalAiLLM(embedder, modelPreference); case "ollama": const { OllamaAILLM } = require("../AiProviders/ollama"); - return new OllamaAILLM(embedder); + return new OllamaAILLM(embedder, modelPreference); case "togetherai": const { TogetherAiLLM } = require("../AiProviders/togetherAi"); - return new TogetherAiLLM(embedder); + return new TogetherAiLLM(embedder, modelPreference); case "native": const { NativeLLM } = require("../AiProviders/native"); - return new NativeLLM(embedder); + return new NativeLLM(embedder, modelPreference); default: throw new Error("ENV: No LLM_PROVIDER value found in environment!"); } diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index c699cf2df..5c43da519 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -2,6 +2,7 @@ const KEY_MAPPING = { LLMProvider: { envKey: "LLM_PROVIDER", checks: [isNotEmpty, supportedLLM], + postUpdate: [wipeWorkspaceModelPreference], }, // OpenAI Settings OpenAiKey: { @@ -362,11 +363,20 @@ function validDockerizedUrl(input = "") { return null; } +// If the LLMProvider has changed we need to reset all workspace model preferences to +// null since the provider<>model name combination will be invalid for whatever the new +// provider is. +async function wipeWorkspaceModelPreference(key, prev, next) { + if (prev === next) return; + const { Workspace } = require("../../models/workspace"); + await Workspace.resetWorkspaceChatModels(); +} + // This will force update .env variables which for any which reason were not able to be parsed or // read from an ENV file as this seems to be a complicating step for many so allowing people to write // to the process will at least alleviate that issue. It does not perform comprehensive validity checks or sanity checks // and is simply for debugging when the .env not found issue many come across. -function updateENV(newENVs = {}, force = false) { +async function updateENV(newENVs = {}, force = false) { let error = ""; const validKeys = Object.keys(KEY_MAPPING); const ENV_KEYS = Object.keys(newENVs).filter( @@ -374,21 +384,25 @@ function updateENV(newENVs = {}, force = false) { ); const newValues = {}; - ENV_KEYS.forEach((key) => { - const { envKey, checks } = KEY_MAPPING[key]; - const value = newENVs[key]; + for (const key of ENV_KEYS) { + const { envKey, checks, postUpdate = [] } = KEY_MAPPING[key]; + const prevValue = process.env[envKey]; + const nextValue = newENVs[key]; const errors = checks - .map((validityCheck) => validityCheck(value, force)) + .map((validityCheck) => validityCheck(nextValue, force)) .filter((err) => typeof err === "string"); if (errors.length > 0) { error += errors.join("\n"); - return; + break; } - newValues[key] = value; - process.env[envKey] = value; - }); + newValues[key] = nextValue; + process.env[envKey] = nextValue; + + for (const postUpdateFunc of postUpdate) + await postUpdateFunc(key, prevValue, nextValue); + } return { newValues, error: error?.length > 0 ? error : false }; } From c2c8fe97562202ef028d9f57a220e84655502f19 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Wed, 17 Jan 2024 14:42:05 -0800 Subject: [PATCH 38/41] add support for mistral api (#610) * add support for mistral api * update docs to show support for Mistral * add default temp to all providers, suggest different results per provider --------- Co-authored-by: timothycarambat --- README.md | 1 + docker/.env.example | 4 + .../LLMSelection/MistralOptions/index.jsx | 103 ++++++++++ .../Modals/MangeWorkspace/Settings/index.jsx | 18 +- frontend/src/media/llmprovider/mistral.jpeg | Bin 0 -> 4542 bytes .../GeneralSettings/LLMPreference/index.jsx | 11 +- .../Steps/DataHandling/index.jsx | 8 + .../Steps/LLMPreference/index.jsx | 9 + server/.env.example | 4 + server/models/systemSettings.js | 12 ++ server/utils/AiProviders/anthropic/index.js | 1 + server/utils/AiProviders/azureOpenAi/index.js | 5 +- server/utils/AiProviders/gemini/index.js | 1 + server/utils/AiProviders/lmStudio/index.js | 5 +- server/utils/AiProviders/localAi/index.js | 5 +- server/utils/AiProviders/mistral/index.js | 184 ++++++++++++++++++ server/utils/AiProviders/native/index.js | 5 +- server/utils/AiProviders/ollama/index.js | 5 +- server/utils/AiProviders/openAi/index.js | 5 +- server/utils/AiProviders/togetherAi/index.js | 5 +- server/utils/chats/index.js | 2 +- server/utils/chats/stream.js | 4 +- server/utils/helpers/customModels.js | 23 +++ server/utils/helpers/index.js | 4 + server/utils/helpers/updateENV.js | 10 + 25 files changed, 412 insertions(+), 22 deletions(-) create mode 100644 frontend/src/components/LLMSelection/MistralOptions/index.jsx create mode 100644 frontend/src/media/llmprovider/mistral.jpeg create mode 100644 server/utils/AiProviders/mistral/index.js diff --git a/README.md b/README.md index 4249c42bc..6e3df0df4 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,7 @@ Some cool features of AnythingLLM - [LM Studio (all models)](https://lmstudio.ai) - [LocalAi (all models)](https://localai.io/) - [Together AI (chat models)](https://www.together.ai/) +- [Mistral](https://mistral.ai/) **Supported Embedding models:** diff --git a/docker/.env.example b/docker/.env.example index 5bd909af6..8d33a809d 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -44,6 +44,10 @@ GID='1000' # TOGETHER_AI_API_KEY='my-together-ai-key' # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1' +# LLM_PROVIDER='mistral' +# MISTRAL_API_KEY='example-mistral-ai-api-key' +# MISTRAL_MODEL_PREF='mistral-tiny' + ########################################### ######## Embedding API SElECTION ########## ########################################### diff --git a/frontend/src/components/LLMSelection/MistralOptions/index.jsx b/frontend/src/components/LLMSelection/MistralOptions/index.jsx new file mode 100644 index 000000000..d5c666415 --- /dev/null +++ b/frontend/src/components/LLMSelection/MistralOptions/index.jsx @@ -0,0 +1,103 @@ +import { useState, useEffect } from "react"; +import System from "@/models/system"; + +export default function MistralOptions({ settings }) { + const [inputValue, setInputValue] = useState(settings?.MistralApiKey); + const [mistralKey, setMistralKey] = useState(settings?.MistralApiKey); + + return ( +
+
+ + setInputValue(e.target.value)} + onBlur={() => setMistralKey(inputValue)} + /> +
+ +
+ ); +} + +function MistralModelSelection({ apiKey, settings }) { + const [customModels, setCustomModels] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function findCustomModels() { + if (!apiKey) { + setCustomModels([]); + setLoading(false); + return; + } + setLoading(true); + const { models } = await System.customModels( + "mistral", + typeof apiKey === "boolean" ? null : apiKey + ); + setCustomModels(models || []); + setLoading(false); + } + findCustomModels(); + }, [apiKey]); + + if (loading || customModels.length == 0) { + return ( +
+ + +
+ ); + } + + return ( +
+ + +
+ ); +} diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx index a3089d688..da0e7b9f0 100644 --- a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx +++ b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx @@ -27,11 +27,21 @@ function castToType(key, value) { return definitions[key].cast(value); } +function recommendedSettings(provider = null) { + switch (provider) { + case "mistral": + return { temp: 0 }; + default: + return { temp: 0.7 }; + } +} + export default function WorkspaceSettings({ active, workspace, settings }) { const { slug } = useParams(); const formEl = useRef(null); const [saving, setSaving] = useState(false); const [hasChanges, setHasChanges] = useState(false); + const defaults = recommendedSettings(settings?.LLMProvider); const handleUpdate = async (e) => { setSaving(true); @@ -143,20 +153,20 @@ export default function WorkspaceSettings({ active, workspace, settings }) { This setting controls how "random" or dynamic your chat responses will be.
- The higher the number (2.0 maximum) the more random and + The higher the number (1.0 maximum) the more random and incoherent.
- Recommended: 0.7 + Recommended: {defaults.temp}

e.target.blur()} - defaultValue={workspace?.openAiTemp ?? 0.7} + defaultValue={workspace?.openAiTemp ?? defaults.temp} className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5" placeholder="0.7" required={true} diff --git a/frontend/src/media/llmprovider/mistral.jpeg b/frontend/src/media/llmprovider/mistral.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..1019f495d4d690dd639aa9f4e5751c403b2eff27 GIT binary patch literal 4542 zcmb7Hc|4Te+rMWu%*f0z_F}9tmSJp(7$R%7QrXHH8A7DRz9ppW*|Vh3gzPDW?6Rd4 zDoF}ik~Q0VPtViueSV+k{pUS%eLi#D=eo{y&iy^#`#RUzeY-mjU=4KjbpZ$j03hHG z*c}D50T>j@5U{~PL7)%_I2^&k#Dqj)STGng3mVP32ajdlgWH2fV>z)nb`Ao8fMMg} z=H%eUa}YQfK_D=23>?9XKrnN#qFFiq-)*-Uz@dN+paTXG0-!hu3%UU_Zn74>JPE1cyOUklhgg0|UomVOa20s|Ejz{^vi5tB^UVY1V)2 zCr{;yx;W(HxAHu%8vx3Zv`o*HEtI(&Wv#j|UnyaJz^-{00OJJAcD60YeXH|ugf;l8 zZy7%ACvFZS7CdguMa#Ex9Vm%=$2San)9t@c*Y9mqMa+njkD8&BEBO@cXc+(qsJ`$& zUdVkh4n|?4eC%?%n|3ty{N+M%@8XwjeRLk-LsdZfr|UTzR-L?xrU0-p{B=6EY4j{_ z>cP^vjD(uifyQTv0m9Et3JVn%*xj6dSH1tk(@JxtSFPR^X$J9U8V5B2w(Lvw>uR=d z2yvaO{Au^jziMe#7+WY)zS!o~Jteo<7%w9l+p@W&XLJ_cW*kSiG=FdOlIsdL0Qp&F zTQTgTcT`(xOqEa6nwHW|DH!0d$MwFJGVSpXc0X|P`XR6R#w$+ zv>~HUp1mR9v{$lg!gI{sFU*|y=7t|U#uw<+NxH`Dleny+c!2nzJgz-;F7AGHQMYCM z+iv{VA&pmi5p{)YmKRjs#qK%Dz_tJAl`3$` zoN&pr{DRn08(LveaE9nl@;`U+UpgDe3@LE8U`Qksr1js_fx?jh1cqP|#By*dC@K*s zczGO|op{CzWC{u-5Cj3;1xyP|#YK`{ytX``ma&mgo6e`1!6M~8Jllf$iJd+5>1NN3 zCmo|bH$r4@$(~-5418BE`na)n;m*~H@ad<)W5sJO&r@f9eQXS!Z(}Q3*#&&4Y~uwl zpH`lCr7$&JFV-DN_CRQJNcHOV6Jplq-8`Po?o7rA{OGHENSn=6 zo$9TxeCSHy(z@}$ru(wX!;GZEYW3wjzJh0M-VH1Hz0AIF)}~cpgTx3fyz#7GpuF)- z(z_4O=Jj^YFEf3Ytu9>kFU#8NkaP)hpa)S2aj@@AA4I0>D}|)p<~oI=2#`5@5TX3C z{2~=e*c7q6!u)zK>H@=)_WWep>QTl54VKo$H&P#{KQ!z7#55}Nc{b>mKqa1!Z+N?t z)IFX%WmUdfe?&Y&fYxx6zWFVvM~`;?^qR=f5v`2NSmgwmI2%-CF0E?G;a(1|kUgwj z!E9>a$Zb?*9*>?$h-K}G$U<-g4vEFH69mo7y*cFMDGF2z`-HqEPGO%4O)b;k zGy;>624u%cP*pdqUWBjYvDo27ZpF(CjeCN%gSTp8BSY@mE=+3kE!<8!al0!tNgAVsg!;&X^W@9|=>3&WWK@&#_hkp!? zxhUIJ-Q|kr)UJ^db;HE(0_xTp)vX(tlNKYV*0)MLpJwSwNq*DK?2Bc2TvwNEEp^%! zb+Bc%UdkQgNb~cKn%_!p=TuSgmn|l(-qf*n8#)mpHgEhDo!D01E7s0&;GjtxJ!7@q z8vVf&(z06hGW&Dwkd$;u*uxTK&Baei?_bO)XGv4eUYc7$qf3iRK1Wz|?DbY_-zozE zr{Dkp0f93y!4Q8wX9x&54uNDB#Nr7QGw(bOqMW?Bg}qPHfPyAf`!Bem47j%^*Eohw z*ihFJxJOrWp9E?^=Zu1Aeh;;(<8IE~+CqM;+WtJf&gIgdqUf0%IcuKoA+v50C`KNV znJeJ!vv<8!!jt<}>|^DSQ{rju0h#`r$lEh_sUOgXa)P2jr!Zp~bvs1rz&77A{b|Q5 z@y_`x!FHL&Py7>{bB@P+73S9C)?@aNP|>ju6YqDYS+iNQK~ID}65yQq zk*8Oeoc3H1Y1nExV0zK1Su)dw@yxo`kZG*jojG^A`834;mYa3Tdbg}sqm`6#SN*lx zb6@gb{FD-iie>A0@`#1Lzmds>b}CwI)J#0u%$nKS(jwDin$7^Su+X34yQ+ zocW^vcaMCqM4T|n;QI;FG4UZ!_8Dyaim8n1H}ZybES=5sB%ZO>xU zi>G*#t22@xO3=oK?mc+%rSGup(??1%Ewu6FK?Oe}eXk12h093zZsCdUzzHdrQ7ds(~uMc7VVN2jz( zXYnP^>+P$=bCF_BEbQAH$NHsjzZzGGC)Su%7^=opJfZokl)C$LO4f9r(bm^~lj=us zlf6Ho60yD>p?Xy5@S`ef)Wn?B2Vwc|G0y-U`(0pzVU8!pRV~%stqc=YCs(>|i$vQr z58jkRC+BE^z(NC7h)b`!z=fcamz!S}EGH5#pbgYs$e3SdkL2z}g~W&9e;X9>?H@gIaL`I+pfN0GQ}1C2Xv!}meaA9c6i(w5GR!RpKDbToz@ za%71jU?j2aI|__dgc@8qb8p%kw1iX><&zjQBP6E7a__cs3H^c&MRb{vqSrevW+>VD z3>v)`g4go8ygdDy*l>P~uV%Xmvw5GEsa2|)<_)cb6ZPFBMXhafLu8xIe)!`7I%1R_ z=y^zJm@G36_x)G^0)-cuXF_Pxg zb?&aB<{eyv>Llq_ftt9$33U`~x`>(f+@_$)g80^_qt)f@f<1q<#$dvF7P5u!*Ih;P zjACPp_=7xzmr<%8#%Cthpii|4Gyi@J3v7Ah;%A{+oPDgf-mll2+;wR*)wX(;c-!jbGw*QAs z`j!!U#s2bbBJ(kJvQKJQ6*k%*yBva=5UGQ!ab0gCAKW_@o##Dq3Qe_Q>DQe<$cP3= zIbE^tv~qkCQ${9@S*C<_B6Tj7Cd`uznw-Lg>dQ5;x71krNt!v7 z)had<{xqygaP-?V_l%}{b^$KRmhLQYDEN9mec73{F#Bi=x zPIeR3j)-}Fa8{&jQODEPb8YCc1MtY^J5l>?jb!bOtHY1$4^C(q%5|KV`POWxwxt8@ z*gtL>l4+ed9Hh-yaE`W;6 z+DDu4kFMaT;r9oY0|FcR)RuC&V13C;WokdW5B=!dhRp{2>{D&o1=JR48$O8#t*(d#961 z<^3E;;h8Gx2^9ftEAP`)E5$9zS+lmO#1y9g%?c$k zyf~=2@4(n}^UDLCUyTi`nl`7qE=t97T(l6~=}Xs>v(^es>qU<_8=T40e<)_S9b2$B zQ}}tLbqK=p!U9=al7Lc_@Qc8#Wo`C&?$tr2uvq4nfB;DR1^|hG|Ea$J006!})huDa zJ2dZyrfJis-=8Ss6vu9L@n`IdR>-9?RZ=-`n=YYQ@g{e|Yas+^iq$x$0nD|hKIo)j z>dYSPL19%oX2Ek`ZfqL$hUVHA`7f%0^z5X9_DeHftqY$Y`O!k&x5yVALn_}eajO`Q z$rc(POpCqx@A!*R%}8Jw+YjD8z|H+DK|yc;k!)ta_ya3PO~@M%tndzP`rn&}1{S<|^z^$% zB%}CTqw&fP&-G@lb86L%<0AB(n{@=20Bp_Kk%VTilU>|q5Sf)ohEdL5L{(2&Xoa>% z6_c5AWr{5-J#5mB9@Wk2` z;A~6>C<^CxQj>07I6NpP9TCm_C7XRa(cs+u=*^7RO?T%4HI{obP?wa9j7(3AeAn_>w#$rT%uRbT$kkdRXe35 zbUj1`=Xg!6IB^OkB^2<-&7&kVny4W*X;+!jcYU)=crDn=F6nPI(EHqI2)3^fO)?`F znYh1qT7BA2Y31PLd~2g}Wg3=8i^vZ?CVujk2tKB(bMEScoZ-h(`S9rUts%Eg1x;^l zqnO8<+6f0Pn^;{JW&TRn^EhZXD_%YS=&Ic}-N`Ud>J?9$fHb}g-94%@;#h;_Cx)+^ zm&fWhWRw-+`2B2-3Z_h7N-K@*8VI8d*07y>t9~Cfg1UC~nvt=Kefh

GOhkykkrx z5<$gl4%{{1&%57c5Qv$XoY%TcILb6UdoL+VK0U}ckiTqHx51ZU zmwjXW&11_L&eEgCFq@TP2dU4yzzlh^iUi9p_NpSM8p1Mj^7zmjK?2}Op8*u3nr2;h Kf#<=y@BRlu0mfYb literal 0 HcmV?d00001 diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx index bd6ae511d..1efa818d3 100644 --- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx +++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx @@ -12,6 +12,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import TogetherAILogo from "@/media/llmprovider/togetherai.png"; +import MistralLogo from "@/media/llmprovider/mistral.jpeg"; import PreLoader from "@/components/Preloader"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions"; @@ -21,9 +22,10 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions"; import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions"; import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions"; import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions"; +import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions"; +import MistralOptions from "@/components/LLMSelection/MistralOptions"; import LLMItem from "@/components/LLMSelection/LLMItem"; import { MagnifyingGlass } from "@phosphor-icons/react"; -import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions"; export default function GeneralLLMPreference() { const [saving, setSaving] = useState(false); @@ -134,6 +136,13 @@ export default function GeneralLLMPreference() { options: , description: "Run open source models from Together AI.", }, + { + name: "Mistral", + value: "mistral", + logo: MistralLogo, + options: , + description: "Run open source models from Mistral AI.", + }, { name: "Native", value: "native", diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx index 281f1e8cd..3b0046382 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx @@ -9,6 +9,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png"; import TogetherAILogo from "@/media/llmprovider/togetherai.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; +import MistralLogo from "@/media/llmprovider/mistral.jpeg"; import ChromaLogo from "@/media/vectordbs/chroma.png"; import PineconeLogo from "@/media/vectordbs/pinecone.png"; import LanceDbLogo from "@/media/vectordbs/lancedb.png"; @@ -91,6 +92,13 @@ const LLM_SELECTION_PRIVACY = { ], logo: TogetherAILogo, }, + mistral: { + name: "Mistral", + description: [ + "Your prompts and document text used in response creation are visible to Mistral", + ], + logo: MistralLogo, + }, }; const VECTOR_DB_PRIVACY = { diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx index dc060594e..9e8ab84a9 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx @@ -9,6 +9,7 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import TogetherAILogo from "@/media/llmprovider/togetherai.png"; import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png"; +import MistralLogo from "@/media/llmprovider/mistral.jpeg"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions"; import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions"; @@ -17,6 +18,7 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions"; import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions"; import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions"; import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions"; +import MistralOptions from "@/components/LLMSelection/MistralOptions"; import LLMItem from "@/components/LLMSelection/LLMItem"; import System from "@/models/system"; import paths from "@/utils/paths"; @@ -109,6 +111,13 @@ export default function LLMPreference({ options: , description: "Run open source models from Together AI.", }, + { + name: "Mistral", + value: "mistral", + logo: MistralLogo, + options: , + description: "Run open source models from Mistral AI.", + }, { name: "Native", value: "native", diff --git a/server/.env.example b/server/.env.example index d060e0ab5..26c51927c 100644 --- a/server/.env.example +++ b/server/.env.example @@ -41,6 +41,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea # TOGETHER_AI_API_KEY='my-together-ai-key' # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1' +# LLM_PROVIDER='mistral' +# MISTRAL_API_KEY='example-mistral-ai-api-key' +# MISTRAL_MODEL_PREF='mistral-tiny' + ########################################### ######## Embedding API SElECTION ########## ########################################### diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index cd008d420..53d42f2e2 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -159,6 +159,18 @@ const SystemSettings = { AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, } : {}), + ...(llmProvider === "mistral" + ? { + MistralApiKey: !!process.env.MISTRAL_API_KEY, + MistralModelPref: process.env.MISTRAL_MODEL_PREF, + + // For embedding credentials when mistral is selected. + OpenAiKey: !!process.env.OPEN_AI_KEY, + AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT, + AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY, + AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, + } + : {}), ...(llmProvider === "native" ? { NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF, diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js index 17f2abc4a..56d3a80f0 100644 --- a/server/utils/AiProviders/anthropic/index.js +++ b/server/utils/AiProviders/anthropic/index.js @@ -26,6 +26,7 @@ class AnthropicLLM { ); this.embedder = embedder; this.answerKey = v4().split("-")[0]; + this.defaultTemp = 0.7; } streamingEnabled() { diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js index f59fc51fa..639ac102e 100644 --- a/server/utils/AiProviders/azureOpenAi/index.js +++ b/server/utils/AiProviders/azureOpenAi/index.js @@ -25,6 +25,7 @@ class AzureOpenAiLLM { "No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!" ); this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder; + this.defaultTemp = 0.7; } #appendContext(contextTexts = []) { @@ -93,7 +94,7 @@ class AzureOpenAiLLM { ); const textResponse = await this.openai .getChatCompletions(this.model, messages, { - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, }) .then((res) => { @@ -130,7 +131,7 @@ class AzureOpenAiLLM { this.model, messages, { - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, } ); diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js index 348c8f5ed..63549fb8d 100644 --- a/server/utils/AiProviders/gemini/index.js +++ b/server/utils/AiProviders/gemini/index.js @@ -22,6 +22,7 @@ class GeminiLLM { "INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM." ); this.embedder = embedder; + this.defaultTemp = 0.7; // not used for Gemini } #appendContext(contextTexts = []) { diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js index 614808034..08950a7b9 100644 --- a/server/utils/AiProviders/lmStudio/index.js +++ b/server/utils/AiProviders/lmStudio/index.js @@ -25,6 +25,7 @@ class LMStudioLLM { "INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM." ); this.embedder = embedder; + this.defaultTemp = 0.7; } #appendContext(contextTexts = []) { @@ -85,7 +86,7 @@ class LMStudioLLM { const textResponse = await this.lmstudio .createChatCompletion({ model: this.model, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { @@ -122,7 +123,7 @@ class LMStudioLLM { const streamRequest = await this.lmstudio.createChatCompletion( { model: this.model, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, stream: true, messages: await this.compressMessages( diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js index 6623ac88e..6d265cf82 100644 --- a/server/utils/AiProviders/localAi/index.js +++ b/server/utils/AiProviders/localAi/index.js @@ -27,6 +27,7 @@ class LocalAiLLM { "INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM." ); this.embedder = embedder; + this.defaultTemp = 0.7; } #appendContext(contextTexts = []) { @@ -85,7 +86,7 @@ class LocalAiLLM { const textResponse = await this.openai .createChatCompletion({ model: this.model, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { @@ -123,7 +124,7 @@ class LocalAiLLM { { model: this.model, stream: true, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { diff --git a/server/utils/AiProviders/mistral/index.js b/server/utils/AiProviders/mistral/index.js new file mode 100644 index 000000000..a25185c76 --- /dev/null +++ b/server/utils/AiProviders/mistral/index.js @@ -0,0 +1,184 @@ +const { chatPrompt } = require("../../chats"); + +class MistralLLM { + constructor(embedder = null, modelPreference = null) { + const { Configuration, OpenAIApi } = require("openai"); + if (!process.env.MISTRAL_API_KEY) + throw new Error("No Mistral API key was set."); + + const config = new Configuration({ + basePath: "https://api.mistral.ai/v1", + apiKey: process.env.MISTRAL_API_KEY, + }); + this.openai = new OpenAIApi(config); + this.model = + modelPreference || process.env.MISTRAL_MODEL_PREF || "mistral-tiny"; + this.limits = { + history: this.promptWindowLimit() * 0.15, + system: this.promptWindowLimit() * 0.15, + user: this.promptWindowLimit() * 0.7, + }; + + if (!embedder) + console.warn( + "No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!" + ); + this.embedder = embedder; + this.defaultTemp = 0.0; + } + + #appendContext(contextTexts = []) { + if (!contextTexts || !contextTexts.length) return ""; + return ( + "\nContext:\n" + + contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("") + ); + } + + streamingEnabled() { + return "streamChat" in this && "streamGetChatCompletion" in this; + } + + promptWindowLimit() { + return 32000; + } + + async isValidChatCompletionModel(modelName = "") { + return true; + } + + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + const prompt = { + role: "system", + content: `${systemPrompt}${this.#appendContext(contextTexts)}`, + }; + return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; + } + + async isSafe(_ = "") { + return { safe: true, reasons: [] }; + } + + async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `Mistral chat: ${this.model} is not valid for chat completion!` + ); + + const textResponse = await this.openai + .createChatCompletion({ + model: this.model, + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), + messages: await this.compressMessages( + { + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }, + rawHistory + ), + }) + .then((json) => { + const res = json.data; + if (!res.hasOwnProperty("choices")) + throw new Error("Mistral chat: No results!"); + if (res.choices.length === 0) + throw new Error("Mistral chat: No results length!"); + return res.choices[0].message.content; + }) + .catch((error) => { + throw new Error( + `Mistral::createChatCompletion failed with: ${error.message}` + ); + }); + + return textResponse; + } + + async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `Mistral chat: ${this.model} is not valid for chat completion!` + ); + + const streamRequest = await this.openai.createChatCompletion( + { + model: this.model, + stream: true, + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), + messages: await this.compressMessages( + { + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }, + rawHistory + ), + }, + { responseType: "stream" } + ); + + return streamRequest; + } + + async getChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `Mistral chat: ${this.model} is not valid for chat completion!` + ); + + const { data } = await this.openai.createChatCompletion({ + model: this.model, + messages, + temperature, + }); + + if (!data.hasOwnProperty("choices")) return null; + return data.choices[0].message.content; + } + + async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `Mistral chat: ${this.model} is not valid for chat completion!` + ); + + const streamRequest = await this.openai.createChatCompletion( + { + model: this.model, + stream: true, + messages, + temperature, + }, + { responseType: "stream" } + ); + return streamRequest; + } + + // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations + async embedTextInput(textInput) { + return await this.embedder.embedTextInput(textInput); + } + async embedChunks(textChunks = []) { + return await this.embedder.embedChunks(textChunks); + } + + async compressMessages(promptArgs = {}, rawHistory = []) { + const { messageArrayCompressor } = require("../../helpers/chat"); + const messageArray = this.constructPrompt(promptArgs); + return await messageArrayCompressor(this, messageArray, rawHistory); + } +} + +module.exports = { + MistralLLM, +}; diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js index 66cc84d0c..fff904c46 100644 --- a/server/utils/AiProviders/native/index.js +++ b/server/utils/AiProviders/native/index.js @@ -29,6 +29,7 @@ class NativeLLM { // Make directory when it does not exist in existing installations if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir); + this.defaultTemp = 0.7; } async #initializeLlamaModel(temperature = 0.7) { @@ -132,7 +133,7 @@ class NativeLLM { ); const model = await this.#llamaClient({ - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), }); const response = await model.call(messages); return response.content; @@ -145,7 +146,7 @@ class NativeLLM { async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { const model = await this.#llamaClient({ - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), }); const messages = await this.compressMessages( { diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js index fce96f369..af7fe8210 100644 --- a/server/utils/AiProviders/ollama/index.js +++ b/server/utils/AiProviders/ollama/index.js @@ -20,6 +20,7 @@ class OllamaAILLM { "INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM." ); this.embedder = embedder; + this.defaultTemp = 0.7; } #ollamaClient({ temperature = 0.07 }) { @@ -113,7 +114,7 @@ class OllamaAILLM { ); const model = this.#ollamaClient({ - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), }); const textResponse = await model .pipe(new StringOutputParser()) @@ -136,7 +137,7 @@ class OllamaAILLM { ); const model = this.#ollamaClient({ - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), }); const stream = await model .pipe(new StringOutputParser()) diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index 038d201d1..582bc054d 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -23,6 +23,7 @@ class OpenAiLLM { "No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!" ); this.embedder = !embedder ? new OpenAiEmbedder() : embedder; + this.defaultTemp = 0.7; } #appendContext(contextTexts = []) { @@ -127,7 +128,7 @@ class OpenAiLLM { const textResponse = await this.openai .createChatCompletion({ model: this.model, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { @@ -165,7 +166,7 @@ class OpenAiLLM { { model: this.model, stream: true, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js index 44061dd0a..341661f8d 100644 --- a/server/utils/AiProviders/togetherAi/index.js +++ b/server/utils/AiProviders/togetherAi/index.js @@ -28,6 +28,7 @@ class TogetherAiLLM { "INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM." ); this.embedder = embedder; + this.defaultTemp = 0.7; } #appendContext(contextTexts = []) { @@ -89,7 +90,7 @@ class TogetherAiLLM { const textResponse = await this.openai .createChatCompletion({ model: this.model, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { @@ -127,7 +128,7 @@ class TogetherAiLLM { { model: this.model, stream: true, - temperature: Number(workspace?.openAiTemp ?? 0.7), + temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js index d63de47d5..764c7795a 100644 --- a/server/utils/chats/index.js +++ b/server/utils/chats/index.js @@ -171,7 +171,7 @@ async function chatWithWorkspace( // Send the text completion. const textResponse = await LLMConnector.getChatCompletion(messages, { - temperature: workspace?.openAiTemp ?? 0.7, + temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp, }); if (!textResponse) { diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index ceea8d7d2..cff565ed6 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -141,7 +141,7 @@ async function streamChatWithWorkspace( `\x1b[31m[STREAMING DISABLED]\x1b[0m Streaming is not available for ${LLMConnector.constructor.name}. Will use regular chat method.` ); completeText = await LLMConnector.getChatCompletion(messages, { - temperature: workspace?.openAiTemp ?? 0.7, + temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp, }); writeResponseChunk(response, { uuid, @@ -153,7 +153,7 @@ async function streamChatWithWorkspace( }); } else { const stream = await LLMConnector.streamGetChatCompletion(messages, { - temperature: workspace?.openAiTemp ?? 0.7, + temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp, }); completeText = await handleStreamResponses(response, stream, { uuid, diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index 87fe976ec..53c641e75 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -5,6 +5,7 @@ const SUPPORT_CUSTOM_MODELS = [ "ollama", "native-llm", "togetherai", + "mistral", ]; async function getCustomModels(provider = "", apiKey = null, basePath = null) { @@ -20,6 +21,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) { return await ollamaAIModels(basePath); case "togetherai": return await getTogetherAiModels(); + case "mistral": + return await getMistralModels(apiKey); case "native-llm": return nativeLLMModels(); default: @@ -117,6 +120,26 @@ async function getTogetherAiModels() { return { models, error: null }; } +async function getMistralModels(apiKey = null) { + const { Configuration, OpenAIApi } = require("openai"); + const config = new Configuration({ + apiKey: apiKey || process.env.MISTRAL_API_KEY, + basePath: "https://api.mistral.ai/v1", + }); + const openai = new OpenAIApi(config); + const models = await openai + .listModels() + .then((res) => res.data.data.filter((model) => !model.id.includes("embed"))) + .catch((e) => { + console.error(`Mistral:listModels`, e.message); + return []; + }); + + // Api Key was successful so lets save it for future uses + if (models.length > 0 && !!apiKey) process.env.MISTRAL_API_KEY = apiKey; + return { models, error: null }; +} + function nativeLLMModels() { const fs = require("fs"); const path = require("path"); diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index 2b1f3dacf..2eed9057c 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -52,6 +52,9 @@ function getLLMProvider(modelPreference = null) { case "togetherai": const { TogetherAiLLM } = require("../AiProviders/togetherAi"); return new TogetherAiLLM(embedder, modelPreference); + case "mistral": + const { MistralLLM } = require("../AiProviders/mistral"); + return new MistralLLM(embedder, modelPreference); case "native": const { NativeLLM } = require("../AiProviders/native"); return new NativeLLM(embedder, modelPreference); @@ -76,6 +79,7 @@ function getEmbeddingEngineSelection() { return new LocalAiEmbedder(); case "native": const { NativeEmbedder } = require("../EmbeddingEngines/native"); + console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder"); return new NativeEmbedder(); default: return null; diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index 5c43da519..54e684029 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -95,6 +95,15 @@ const KEY_MAPPING = { checks: [nonZero], }, + MistralApiKey: { + envKey: "MISTRAL_API_KEY", + checks: [isNotEmpty], + }, + MistralModelPref: { + envKey: "MISTRAL_MODEL_PREF", + checks: [isNotEmpty], + }, + // Native LLM Settings NativeLLMModelPref: { envKey: "NATIVE_LLM_MODEL_PREF", @@ -268,6 +277,7 @@ function supportedLLM(input = "") { "ollama", "native", "togetherai", + "mistral", ].includes(input); return validSelection ? null : `${input} is not a valid LLM provider.`; } From 56dc49966d663b6d17c71e7e531e808bac415ca2 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Wed, 17 Jan 2024 16:22:06 -0800 Subject: [PATCH 39/41] add copy feature to assistant chat message (#611) * add copy feature to assistant chat message * fix tooltip not hiding on mobile * fix: add tooltips chore: breakout actions to extendable component + memoize add CopyText to hook we can reuse fix: Copy on code snippets broken, moved to event listener fix: highlightjs patch for new API support feat: add copy response support --------- Co-authored-by: timothycarambat --- frontend/package.json | 1 + .../HistoricalMessage/Actions/index.jsx | 43 ++++++++++++++++ .../ChatHistory/HistoricalMessage/index.jsx | 11 ++++- .../ChatContainer/ChatHistory/index.jsx | 8 +-- .../src/components/WorkspaceChat/index.jsx | 36 ++++++++++++++ frontend/src/hooks/useCopyText.js | 15 ++++++ frontend/src/index.css | 4 ++ frontend/src/utils/chat/markdown.js | 49 +++++++++---------- frontend/yarn.lock | 33 +++++++++++++ 9 files changed, 169 insertions(+), 31 deletions(-) create mode 100644 frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/Actions/index.jsx create mode 100644 frontend/src/hooks/useCopyText.js diff --git a/frontend/package.json b/frontend/package.json index 86e552ab7..17d9af913 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -31,6 +31,7 @@ "react-router-dom": "^6.3.0", "react-tag-input-component": "^2.0.2", "react-toastify": "^9.1.3", + "react-tooltip": "^5.25.2", "text-case": "^1.0.9", "truncate": "^3.0.0", "uuid": "^9.0.0" diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/Actions/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/Actions/index.jsx new file mode 100644 index 000000000..12fa7dc73 --- /dev/null +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/Actions/index.jsx @@ -0,0 +1,43 @@ +import useCopyText from "@/hooks/useCopyText"; +import { Check, ClipboardText } from "@phosphor-icons/react"; +import { memo } from "react"; +import { Tooltip } from "react-tooltip"; + +const Actions = ({ message }) => { + return ( +

+ + {/* Other actions to go here later. */} +
+ ); +}; + +function CopyMessage({ message }) { + const { copied, copyText } = useCopyText(); + return ( + <> +
+ +
+ + + ); +} + +export default memo(Actions); diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx index 4637b1cd7..c39220f37 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/index.jsx @@ -1,14 +1,15 @@ -import { memo, forwardRef } from "react"; +import React, { memo, forwardRef } from "react"; import { Warning } from "@phosphor-icons/react"; import Jazzicon from "../../../../UserIcon"; +import Actions from "./Actions"; import renderMarkdown from "@/utils/chat/markdown"; import { userFromStorage } from "@/utils/request"; import Citations from "../Citation"; import { AI_BACKGROUND_COLOR, USER_BACKGROUND_COLOR } from "@/utils/constants"; import { v4 } from "uuid"; import createDOMPurify from "dompurify"; -const DOMPurify = createDOMPurify(window); +const DOMPurify = createDOMPurify(window); const HistoricalMessage = forwardRef( ( { uuid = v4(), message, role, workspace, sources = [], error = false }, @@ -53,6 +54,12 @@ const HistoricalMessage = forwardRef( /> )}
+ {role === "assistant" && ( +
+
+ +
+ )} {role === "assistant" && }
diff --git a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx index 4a7cd4827..358e520a1 100644 --- a/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx +++ b/frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/index.jsx @@ -17,9 +17,12 @@ export default function ChatHistory({ history = [], workspace }) { }, [history]); const handleScroll = () => { - const isBottom = - chatHistoryRef.current.scrollHeight - chatHistoryRef.current.scrollTop === + const diff = + chatHistoryRef.current.scrollHeight - + chatHistoryRef.current.scrollTop - chatHistoryRef.current.clientHeight; + // Fuzzy margin for what qualifies as "bottom". Stronger than straight comparison since that may change over time. + const isBottom = diff <= 10; setIsAtBottom(isBottom); }; @@ -112,7 +115,6 @@ export default function ChatHistory({ history = [], workspace }) { /> ); })} - {showing && ( )} diff --git a/frontend/src/components/WorkspaceChat/index.jsx b/frontend/src/components/WorkspaceChat/index.jsx index 3e129c2a6..30bd494f3 100644 --- a/frontend/src/components/WorkspaceChat/index.jsx +++ b/frontend/src/components/WorkspaceChat/index.jsx @@ -59,5 +59,41 @@ export default function WorkspaceChat({ loading, workspace }) { ); } + setEventDelegatorForCodeSnippets(); return ; } + +// Enables us to safely markdown and sanitize all responses without risk of injection +// but still be able to attach a handler to copy code snippets on all elements +// that are code snippets. +function copyCodeSnippet(uuid) { + const target = document.querySelector(`[data-code="${uuid}"]`); + if (!target) return false; + const markdown = + target.parentElement?.parentElement?.querySelector( + "pre:first-of-type" + )?.innerText; + if (!markdown) return false; + + window.navigator.clipboard.writeText(markdown); + target.classList.add("text-green-500"); + const originalText = target.innerHTML; + target.innerText = "Copied!"; + target.setAttribute("disabled", true); + + setTimeout(() => { + target.classList.remove("text-green-500"); + target.innerHTML = originalText; + target.removeAttribute("disabled"); + }, 2500); +} + +// Listens and hunts for all data-code-snippet clicks. +function setEventDelegatorForCodeSnippets() { + document?.addEventListener("click", function (e) { + const target = e.target.closest("[data-code-snippet]"); + const uuidCode = target?.dataset?.code; + if (!uuidCode) return false; + copyCodeSnippet(uuidCode); + }); +} diff --git a/frontend/src/hooks/useCopyText.js b/frontend/src/hooks/useCopyText.js new file mode 100644 index 000000000..04519b2ef --- /dev/null +++ b/frontend/src/hooks/useCopyText.js @@ -0,0 +1,15 @@ +import { useState } from "react"; + +export default function useCopyText(delay = 2500) { + const [copied, setCopied] = useState(false); + const copyText = async (content) => { + if (!content) return; + navigator?.clipboard?.writeText(content); + setCopied(content); + setTimeout(() => { + setCopied(false); + }, delay); + }; + + return { copyText, copied }; +} diff --git a/frontend/src/index.css b/frontend/src/index.css index 1d1b2da85..e8d7e2d8c 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -399,3 +399,7 @@ dialog::backdrop { .rti--container { @apply !bg-zinc-900 !text-white !placeholder-white !placeholder-opacity-60 !text-sm !rounded-lg !p-2.5; } + +.tooltip { + @apply !bg-black !text-white !py-2 !px-3 !rounded-md; +} diff --git a/frontend/src/utils/chat/markdown.js b/frontend/src/utils/chat/markdown.js index 53b6804fe..ff4af77bc 100644 --- a/frontend/src/utils/chat/markdown.js +++ b/frontend/src/utils/chat/markdown.js @@ -7,47 +7,44 @@ import { v4 } from "uuid"; const markdown = markdownIt({ html: true, typographer: true, - highlight: function (str, lang) { + highlight: function (code, lang) { const uuid = v4(); if (lang && hljs.getLanguage(lang)) { try { return ( - `
` +
-          hljs.highlight(lang, str, true).value +
+          `
+
+
+ ${lang || ""} +
+ +
+
` +
+          hljs.highlight(code, { language: lang, ignoreIllegals: true }).value +
           "
" ); } catch (__) {} } return ( - `
` +
-      HTMLEncode(str) +
+      `
+
+
+ +
+
` +
+      HTMLEncode(code) +
       "
" ); }, }); -window.copySnippet = function (uuid = "") { - const target = document.getElementById(`code-${uuid}`); - const markdown = - target.parentElement?.parentElement?.querySelector( - "pre:first-of-type" - )?.innerText; - if (!markdown) return false; - - window.navigator.clipboard.writeText(markdown); - target.classList.add("text-green-500"); - const originalText = target.innerHTML; - target.innerText = "Copied!"; - target.setAttribute("disabled", true); - - setTimeout(() => { - target.classList.remove("text-green-500"); - target.innerHTML = originalText; - target.removeAttribute("disabled"); - }, 5000); -}; - export default function renderMarkdown(text = "") { return markdown.render(text); } diff --git a/frontend/yarn.lock b/frontend/yarn.lock index c9181f15f..fa1e71331 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -365,6 +365,26 @@ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.53.0.tgz#bea56f2ed2b5baea164348ff4d5a879f6f81f20d" integrity sha512-Kn7K8dx/5U6+cT1yEhpX1w4PCSg0M+XyRILPgvwcEBjerFWCwQj5sbr3/VmxqV0JGHCBCzyd6LxypEuehypY1w== +"@floating-ui/core@^1.5.3": + version "1.5.3" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.5.3.tgz#b6aa0827708d70971c8679a16cf680a515b8a52a" + integrity sha512-O0WKDOo0yhJuugCx6trZQj5jVJ9yR0ystG2JaNAemYUWce+pmM6WUEFIibnWyEJKdrDxhm75NoSRME35FNaM/Q== + dependencies: + "@floating-ui/utils" "^0.2.0" + +"@floating-ui/dom@^1.0.0": + version "1.5.4" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.5.4.tgz#28df1e1cb373884224a463235c218dcbd81a16bb" + integrity sha512-jByEsHIY+eEdCjnTVu+E3ephzTOzkQ8hgUfGwos+bg7NlH33Zc5uO+QHz1mrQUOgIKKDD1RtS201P9NvAfq3XQ== + dependencies: + "@floating-ui/core" "^1.5.3" + "@floating-ui/utils" "^0.2.0" + +"@floating-ui/utils@^0.2.0": + version "0.2.1" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.1.tgz#16308cea045f0fc777b6ff20a9f25474dd8293d2" + integrity sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q== + "@humanwhocodes/config-array@^0.11.13": version "0.11.13" resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.13.tgz#075dc9684f40a531d9b26b0822153c1e832ee297" @@ -846,6 +866,11 @@ chokidar@^3.5.3: optionalDependencies: fsevents "~2.3.2" +classnames@^2.3.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b" + integrity sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow== + cliui@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" @@ -2543,6 +2568,14 @@ react-toastify@^9.1.3: dependencies: clsx "^1.1.1" +react-tooltip@^5.25.2: + version "5.25.2" + resolved "https://registry.yarnpkg.com/react-tooltip/-/react-tooltip-5.25.2.tgz#efb51845ec2e863045812ad1dc1927573922d629" + integrity sha512-MwZ3S9xcHpojZaKqjr5mTs0yp/YBPpKFcayY7MaaIIBr2QskkeeyelpY2YdGLxIMyEj4sxl0rGoK6dQIKvNLlw== + dependencies: + "@floating-ui/dom" "^1.0.0" + classnames "^2.3.0" + react@^18.2.0: version "18.2.0" resolved "https://registry.yarnpkg.com/react/-/react-18.2.0.tgz#555bd98592883255fa00de14f1151a917b5d77d5" From 3fe7a25759bb2960997de81f9fd433ddf72987b6 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Wed, 17 Jan 2024 16:25:30 -0800 Subject: [PATCH 40/41] add token context limit for native llm settings (#614) Co-authored-by: timothycarambat --- .../LLMSelection/NativeLLMOptions/index.jsx | 70 ++++++++++++------- server/models/systemSettings.js | 1 + server/utils/AiProviders/native/index.js | 2 - server/utils/helpers/updateENV.js | 5 ++ 4 files changed, 50 insertions(+), 28 deletions(-) diff --git a/frontend/src/components/LLMSelection/NativeLLMOptions/index.jsx b/frontend/src/components/LLMSelection/NativeLLMOptions/index.jsx index a41a81fe8..457c09322 100644 --- a/frontend/src/components/LLMSelection/NativeLLMOptions/index.jsx +++ b/frontend/src/components/LLMSelection/NativeLLMOptions/index.jsx @@ -54,31 +54,49 @@ function NativeModelSelection({ settings }) { } return ( -
- - -
+ <> +
+ + +
+
+ + e.target.blur()} + defaultValue={settings?.NativeLLMTokenLimit} + required={true} + autoComplete="off" + /> +
+ ); } diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index 53d42f2e2..1c4069ac9 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -174,6 +174,7 @@ const SystemSettings = { ...(llmProvider === "native" ? { NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF, + NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT, // For embedding credentials when ollama is selected. OpenAiKey: !!process.env.OPEN_AI_KEY, diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js index fff904c46..de1a97f3d 100644 --- a/server/utils/AiProviders/native/index.js +++ b/server/utils/AiProviders/native/index.js @@ -94,8 +94,6 @@ class NativeLLM { } // Ensure the user set a value for the token limit - // and if undefined - assume 4096 window. - // DEV: Currently this ENV is not configurable. promptWindowLimit() { const limit = process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT || 4096; if (!limit || isNaN(Number(limit))) diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index 54e684029..f44b040b7 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -110,6 +110,11 @@ const KEY_MAPPING = { checks: [isDownloadedModel], }, + NativeLLMTokenLimit: { + envKey: "NATIVE_LLM_MODEL_TOKEN_LIMIT", + checks: [nonZero], + }, + EmbeddingEngine: { envKey: "EMBEDDING_ENGINE", checks: [supportedEmbeddingModel], From 0df86699e7b4b8a76c83da796c1295864da583e3 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Wed, 17 Jan 2024 18:00:54 -0800 Subject: [PATCH 41/41] feat: Add support for Zilliz Cloud by Milvus (#615) * feat: Add support for Zilliz Cloud by Milvus * update placeholder text update data handling stmt * update zilliz descriptor --- .vscode/settings.json | 5 +- README.md | 1 + docker/.env.example | 5 + .../ZillizCloudOptions/index.jsx | 38 ++ frontend/src/media/vectordbs/zilliz.png | Bin 0 -> 14336 bytes .../GeneralSettings/VectorDatabase/index.jsx | 11 +- .../Steps/DataHandling/index.jsx | 8 + .../Steps/VectorDatabaseConnection/index.jsx | 10 + server/.env.example | 5 + server/models/systemSettings.js | 6 + server/utils/helpers/index.js | 3 + server/utils/helpers/updateENV.js | 11 + .../utils/vectorDbProviders/zilliz/index.js | 365 ++++++++++++++++++ 13 files changed, 466 insertions(+), 2 deletions(-) create mode 100644 frontend/src/components/VectorDBSelection/ZillizCloudOptions/index.jsx create mode 100644 frontend/src/media/vectordbs/zilliz.png create mode 100644 server/utils/vectorDbProviders/zilliz/index.js diff --git a/.vscode/settings.json b/.vscode/settings.json index 82165a178..ab66c194b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,10 +2,13 @@ "cSpell.words": [ "Dockerized", "Langchain", + "Milvus", "Ollama", "openai", "Qdrant", - "Weaviate" + "vectordbs", + "Weaviate", + "Zilliz" ], "eslint.experimental.useFlatConfig": true } \ No newline at end of file diff --git a/README.md b/README.md index 6e3df0df4..c3eb429c0 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,7 @@ Some cool features of AnythingLLM - [Weaviate](https://weaviate.io) - [QDrant](https://qdrant.tech) - [Milvus](https://milvus.io) +- [Zilliz](https://zilliz.com) ### Technical Overview diff --git a/docker/.env.example b/docker/.env.example index 8d33a809d..f3eba2418 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -99,6 +99,11 @@ GID='1000' # MILVUS_USERNAME= # MILVUS_PASSWORD= +# Enable all below if you are using vector database: Zilliz Cloud. +# VECTOR_DB="zilliz" +# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com" +# ZILLIZ_API_TOKEN=api-token-here + # CLOUD DEPLOYMENT VARIRABLES ONLY # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. diff --git a/frontend/src/components/VectorDBSelection/ZillizCloudOptions/index.jsx b/frontend/src/components/VectorDBSelection/ZillizCloudOptions/index.jsx new file mode 100644 index 000000000..5a26b437a --- /dev/null +++ b/frontend/src/components/VectorDBSelection/ZillizCloudOptions/index.jsx @@ -0,0 +1,38 @@ +export default function ZillizCloudOptions({ settings }) { + return ( +
+
+
+ + +
+ +
+ + +
+
+
+ ); +} diff --git a/frontend/src/media/vectordbs/zilliz.png b/frontend/src/media/vectordbs/zilliz.png new file mode 100644 index 0000000000000000000000000000000000000000..e755b0f12085c61990ce8ae45d62fec9fcb3c987 GIT binary patch literal 14336 zcmdtJWmwzI)-Q|{cXudKqku7P49I20>hyf_pn#oJQ6xVt;WC4u4=XmLB? z-tz3dpZ7f<&N*Ms$#vz*WY(HBzh#q|`L9I1(on?1rp87>Lc&v4l6#GWgsk%T#zaT- z_;HYTZjgonI=hpdaUGYIT~}9yO@t8+g}`Fi1uSQFPjwh)4#tY+-z*c zUdt)`ofYv-lI@L$2SkjQ*W25h$6J8M#m$bFUsP0-mk+=T0B|EXxZQoh9+p1bV0UJO zq<DXlh|1Ef}B8)hH>ZR z=i&c1ls4949`+tip#K0a63G@AztiKKUZ;}+O5Q2mNLI7bz)C=;9hzbGu z{yG0&#Q!!#$Hmk7&7)dUfPZrSSKoiJ|Npr8-_7+8RsY?r|A!n6`Ipps^zvUh`{-W; z0pVYPfAcS*PfW(sLPxfJscZSZ-yU4W#(=@Xp3j5bOq*_Ep)~G~1#6su=tdn0RDb;7#Ptt9zlm*nS zt%y5UQ}dbf1&gTUWb9Qai!Ja>NC4FMN*#?Cg%RZzKzTpGt0i ziqh57(_;!F>`Kaa!gv}+sHK=s`Vtd^MTId8^5XyYrb;#4y8&}@%`p3*V3l_nCk9kO zqo@2{m&d9>7>AmeYU4yCkQeHoP$CKaU= zrdQc4nJs-&2egd&?;c0&f3G(*eDCzg_T@(tjbhCzQ$GU1=M3ekL!Tl-Eg6I;*cN%O z1C^Z`RndET;v&oxk94$Wg&cCE2tH_`A58&3m5A&SN~(eCl)d2x7LNR|zl;6{U~shp zLa_d$Y6hckm!hZS3SZjLM7@3zon`2P^;(V&(Nqx|4nar}dX!Y>rx9Y?DnIsAEBZy% z$1|@HLD~1}nDvkNZ0+z!H|Nw*azwo9VOW;pb8YNnlb=asXC4Zcgs*PEVZ)HhWgi}0mg8FtF@kqfiwo~3GKW--4GY26q*`*; zDMU6`#_%3p`2Ft)meRQ0-dO7ky^IGNJ=t2eqNN<__Cp`gd=<83C}0S)%9m{9mec<} zF#tX7b~%Is)<85-2^#2ZdYY(p-596!p8azy0EKoQ_4pR`Q!Bl&zfGdO;qASeuc+FC z;w67GlP!I_uD7_*G%3?__JR>WE_ay7acD)oLF{y6gZ_2$Lgf)E@HC7N<^27A@8-uD zA|eGUP#Q-WPIG-m>~!xpZ(VMIQOcGxLLFLM=|J()Mp#6BVQO6y~iX5$+4FshnCl;W6XQ&at4QDSgM4j;TVDv z3nh$lm>)xrxku~n^9t5#)7P)!M>;5+YmsEXyAq_y`sYQL$9k>?k4c!GBfwY>5CH#} z_m2B)rbH5~F@F*|Xjb{8N<>2@9i&77?^TEG6?RoIhV&DTwL@)-6!$_BI9xjQB_` z&yW7vemf$x{3^UsK7Rxm-3;H8x{Lp;dn>H@eQA%R5K|zIW@}Be$XRK&nWvS(^iFFGRSJQ$Hh25TeMO^5oyF9%di;Xh3 z0E%|@%(0`b>Ef=-bj#(SJY5=KqsSjoye2PIX)t4wmO`erpacFL@2T)AhAv2#Dl})w za%j4<@9oaDu%Cul@3EXb+Q>)W%Ue7ei(5{}HXYQLk~o-8{Mr}w z@+D%lieu-*amFz3$&Rf_f4R-lCfvDN8n+*cCscxSsu^h2X2zhyadz<*uR*@#*fhu^wh z7|xXA1>9UZ=dnLw8KSkj^!}`H_-)0hk`QC|G~*A;!D{B!rNpo}hnIMrOC*6*&1@_ajR5`5|gIKF4=5IMSDvO}2=|nL{^bHyRd7cC`QP zoInY8jPh4=RQ`((?7!d16nu7d-v-y|dW*P@WaHAz+6W6Px-P}(FV4GNI*u_0uv?&hI z0xfA=o)th~P&w&%&gK zjQRRBIpHggf5y$gfHL&ms8HmiZeYF|T;bc=X@ zfXEWphQyWJoB0mAW~RQ6agdc`*vFd9a_mRl(TFIcj@#YZmR?Y;DH{l4-f zp*=Mf?v5P=AmHGMh^cVpC8#8y-H=MhDayLaTxkP3r#18V)zJhU2HVvzsLXGdthP8F(U$zHzr{(x0=gVK*?BBsJl=DQMl^|j{ zL$y5Ez}550xApis)3q`v_hMgPj}1vpY}$FGLeH~R*I^V{&a`fb$4LFeicA(=2Z zIQJHh=YfpNFw_yN=`dPaV0b42#~lHz7(@dM2y1*_=_5!oJ6pzgA1;Uj5jC_HnoJp4 z5?JwG7#(w5#eGYktUyye(uNAJr+{V~!)hA~pUH?Q%0H91eVf5O%#9Fa_Y2gg*099G zn_=geY!*CfZ$k@iUa*}~t-Kd7sG15ewRHx%zq^>P8Fh4zbsdN_6AsJd;4tzWX1gVr zb-}L)Xbh9f+@AT0vz_3zHd;3{J2V7~QfxI{dhX{b*+&^PK0`(B+3+35V#mUz$}TxU zzkA1=%Jh)8KC$ky#cP=JIV|z3Wi6oo13S(;8QDB%i~ zu_q1dSml$4#Z9*(M=)XWChuUcvew=lJmKSa&G%>&YiP$69l1}YfeL5AG~-w$WsMPX zhDfAjottw;c9s^5JS2%y*YB$&dbzzkGtHzyEpC(j@J-w82DK$Z>ErM3D{{CvBOf#X zO$)qZz}p0aR~w06702V@2b$2f;XL1N$YQo*ij6oD5&lYic=Ao>?Lyz0t()kdH%TYY zh8$-*W;h_{eA|w!!{g^~EySd-;ah)FfD*I1CQt0nz2k+9M-1L9yES+*M{de@Y+kO7 z(sXK$h5kOZaJHXNN zR*^IQqH-y=m)+N6t`Va{c#v^xK0p4SH4+kOl5Hincny0V4d+f@ayIr=x?h?eqvYGW z1MO16D5ttr9IS5S4TIfQ7l^VBSFdUr2eiBmlmO6^fxK2| zZ8LO>D5;PlMT?xWfG2{=rMs@|s9LGi-OLX!0zX_>_(rUWD)NV4w$##QBRQ3QtJs7z z)5=gN8T>%!aw(=VfFcrvk>}CaTpOGlfclmpwNrtNaEKiz_G2wHOTS^9DcFI8E0yww z{(f6}Y)&A#Fzb6Q0KK`b?hC5-3BfK-k&sCsA2Q(Bx&f}y;K${(rf!i8uvJV(hnEgR zi&nnEHDI82QjgTqRdpFie#e}V7ydzEjD6FnzYeCo-V{P@u{lEA{V$iT);_? zbPmKTZTik)c=%D6Kk>Ex`pzWUV(sZkrw_E$8PYEeoqC8-1tcf)5GV)q>TtxT=NHGq-+yg>i%;Y)T1+qC#?N&$fN4bB-?Q6q+pe67- z-)N0F6iAjR!ijAhFgRxpQKSL|YZs7~w+yiw7c{*X1&oHK!e0%!F_w&sSGUbv6q~KF zG^^7->xJk^n=?c)yNpG;yM$wWQctp>Zayn?|KW@s)JomA!lE1z1yGS$YlDpN!zYP1 zTzEH92gkY1Pz3C{JK7@my|mN!)uy*v-QW4Om4Nq;MhOpZh*|)y6|vnC&*cpc zd_-sC_n^*2qsqco?eCk81jUyEhX=_*GiUgTnrR)X3Eh2o3-N_wbmCnLc2~I;R}KOa z(w%wukxk!P=8Cg&jP&o`PCWFe1u*rkC?>SycY}jViZhe|vdI9weykj5qcLoC_}QRS zK`cNH@#`D278x}z+h0E6g87FE^Ngvwd30yk;+kuJZA|0#a7HL_?-`|7&z67g%E9w9 z)*SaO`^;vL>$z2fU3S%Q!q)-J^5qtyWif08gM#!A{9mx;D z8=4V;p?Uf)?1SsAgLpJup!BZx{-%9feP1==+0{Gvct5P+h&A7X()y2pnJ*Z{F=fHwI8%Zg6-q_xlks?=p~2S> z&P=c(jre``?D$IgdM31dt-}xJa0A;Y(5&fzzR-p!i!bmQj<8{nx_{U+-f zY%38m`L)l5I_eFxRz!#MTvxg|6zqnwuU1otp6!o~ON-m85w?!xVrbls+!lmGE3xC; zGBr+Vohl|VuH)++~P5BYp?>RazbwNA%-Vh2pe{7uh4#Lx4 z#T54fDFtR8RQK<5azxlagrVbmZ?s7D&&zplq+OCAs}E(Qir0pT^G==HHO9(OJtZT9 zOnY10cx||p@;3i;)TMVGQ%$j!pD4hra}>+hu%l z!S5K;^byp`%!i;&_%9mxgleUu=Ivl5tE1IKNomz`iKC+&ecoU;($$XaHgEH80nU{# z6~%HBomqwjE!`4LmqY-Pplf48qdvh-5cbI1ai;hg#dO!J!)k(kW8EYH3Tp44-|w03 zH?~mfK_bSqL|kg!5{JKN1T_R z6u1BYbx$|9HyMK2jw3#qTKwFG90Psqv|e{meSGiweC{jS99}qQ?VX`n!NU*a6W9&% z0B@-2d9;epF>1yG@=Q4zsz!JXhD^oA$kKE*l?edllTUBf?X5@6;UdIFb!VRX$Pf2Z zw$(DW%5AGl2>;<^*Tm4|IjeL$`Tq=hl$)9e!cB(}jvF-5q3K!a( ze+nNiV5F}+$*OI=d&H#<$?&b;ru;{Og?UKnX$zz zZiXn`Y+32^aK2yZ;Ph5MJD}gw?4JzO!hB{y0b;jXGTw%z`d6Yk)_N2t+uf82POF$; zNyI`!#zYJn_-^Km>As~lZ(H3qr76;EL^qf~FaPn@NSXi7E}AUH%ZFsxb#8Z3>a6G0 zpdk0shbfKqW63|1P@i9UhvN|rH#IL2_Z`G!WN;V;UjX_@4#k7>;iY4y5qS!5JfZSi z4^ik_&(G?r22DeLrn@#Diw$kwuN+vi+205-YAJ-Y`dJ>C`1ql78PS?QjnljWRU@`e zpK`%jGD9Ob|_H*H>U-ZX*=pEljOG|v2l;aR^Nxe)}QiitI=ukpoc5yB}R zdG~WaZu>S~|N5x9WM;dFXMG$)lHRZ#e;N34DHrQPP9deHq@!*Wu}{VuIyufb+Yj73 zy)~<;Uic*!v43qNG-6EofF=XrYJbI(%=3#!zGWFOU`Ax_cz!`{4{LX z;>8uhZu~4`dvnDH@71}tUEs^asVN;W7FV?HTI_a5I+<1kP26u2lQw-UizviwYE5W zTX8w+H-PoK;J%9Svhwxeo@w8E$!?X0(w-n(L$Wf^80RQ(+1MCn5Gt+ z$U4u<@0`h}?#75Talgl*d!bf^*+*(5su&f^&KReL;lecf{#IgO_7VH?Z0eXaAJZSb+mfmvvg9cG-24p# zf9)Hjg0~%&M-v^p^`0%aK?k}lqHhE3i3Ce`tBx=UskEZunPLoq4^RB-I7Ybu&#Q4n ziz#0jp=unXY}29MiNA~rtm3aoWXJhHwhwJ{<#SO^YhExLHjMA~sAffv+wT}Cz_X_> zU|=6sMVub2Zg_m)zCP#N>=k^UhWBjLb)FnBvDP~>H4BDoDR#dW@ zAFNdTP;rvy_R_ixb6~e%hzgAyK9klK$G&jaT|(!di{K1daw@WuA|h?A#FM41%$7P3 zd1kQV4=@sQbH|tRi9ALoMZ{G|Y{f;Fw0T6oc_=CdGs0z1p?ePw|2n$9AWTk?B8KFm zr>M^v)=(5YgJC(JE%A9WIam-_)>EnA9n+P*ewdC-$-|baB1QgcG(IO{1-Fyie6B=l zx0rQ@uIBo}K{KTa{J5gT3SJSx&Uq1O)Q{U$f=dY6vH-c15n2!6gZ$eMFAhIe){op> z$qZhK6hQMy2t?{zP;3LRNyxjGaw!))`)US|78k#eXD?wXc!tm`K4n5&dw2WSlHLCy zRrAO2lVPP7xZP~4CP44-PU>xCLd=G9 zsJkQsU_Mv^`-t1|fqMX@2-f`>;6Sp2Q8}WPwR%pAZ_8?DlK#jCw{Kiilugy@3T)nR zsOEI#B(8Qz*~c4vwp2q=y7Zeh?wtnS!4gY|B4UkS&Bkc=qm^14VU&3ZOcvs%?slf= zYHW=wIj4VjZ*4JtZ&ALj3w+iK-}8jnkeF0KP}K~uD|jPwGQqOwdk%&atMr@Y+)M^P zaD^2Os_>8KaDJy#2H81P_x1YLs=%7$l713C-Z-IN3e>o9Plpcig3o$Y?ZWRz5YO5|M$BxMy&4>3{gYVl(Xg$nx89>ehOZ!6 z<~NjV+yl3C{N+PDW5$hSpRSM;*tjWFBIDy=TiS8ftLPb(D8=6+Du!G8f7SZGrBMc` zuouJVD9lx_p7pxYqV0!KLG0mANyd zOtO#_u{>LAMuw+hRQNX1Ak-M& zTq;MFNDq~g+EtEtnTOfF6Hls^Mtauo;f>~E{VKQ`jy%c=M2LgwIm5fEWN4zk#aggn zwNmc-f!~kGQK_NrC!LoU0u;&GS9{v9<73*uRb6uX)5U<}7wzY-s86}vR;VF?Y#Z|p z7_8;tCytgsdG-wyz41j9VECS`vKB)omvFA`&ST12=<%j_zl5=ec?-tX2x{1&*b?EQ(X_ZLSK)1 zD3V73LQ@#RO4i;*eu^~p3s9Rac~)qW-tp(f3gui3|Zj3>)kG6M=P&c5bnV~DZo=SShQz?mGHc59@o;7GN zhHHpEAx#~sIoCl_kIt<8PMLvaC7gJINcCQP_hH278Dzhsa67Y@p6*1`J!DE&oX=+4Nhay)*sqyqb(7R$Uzs;-41W$awBu;bwP9Mta z+BaP1$fRu}YRhMG8#{dMY<9Ep0FA|eW&Ch~lfDx~mY}0U+3t0jUSg?wdqiq(Sg2U_ zXU8H|_8i+a!sGVk#YrRgl5HYu(i32vsEH3(%a570+V*AC1g?-QrfL%krzp@S@C3&+ z%W$rsF`GQqYO>fMzno5nUmtvXjccuLV~^Lcu7>!eOE#y({n%Iv<;)USZm}8s6ePYB zR_qDAUWNCIvTpmmw||hiyhFL1e$hOOWBF8TvGN!xHV5;8-X(=`nd@}K400ns!4orV z$m*}8yHGe8pV9xjG*$cZ`~-gc+Ms@VOOj_Od9JIzEiBpCK-e7fMg9+)Q`*Z7l0N6i z4|#*s>D!*p*30Pu-}(>70i&y^@U7QVtFK2->TAk|$V1d#8R1GFVu+}n(`!3ieL{mz zL2mZ37`2KkJ>p!KU?Ct5!vbx@=0UAmE8r5Q)F2plI35BWEmF1){*cKvrO28&^5Gm} zB%ZP1o_+>ef;DQwh`A|yF;FOX2Wp#yl)C!aQqKRiM?Kkd60$>ZNf)uwqWK zpBZBVR;ux=HA1y`W#reR1Ro}^_pK;b>GyVzC@p0=W2#%04REgDF@1{zm28gJbU4;L z;TqhCm{+T^rx-B{XS*uYn4Kv{_8VSlopV&cnrP%V@xqm8oaq;`sysuI_`|;(7}C5S z-Bs`z@#G8x@&Md8qSjn-5&=d27W)w<&J!#n3HbSO1h8As{qPD{+y6X9qLfn&l9Zuo zxaz~y(j?Yqni*qIHl*o~o$e8Vf3{+(PfAWiiG`CZl-uoj(RFswxyOsY7Gp|P`qrgx z_ilmz`|+fgD}!6B804mx4@NPW%2Mdj%pGsQ6eB3QC=@|rlzhJZOD_e_u?w%&WY&&E zQ55pPG|rIBPKf=yV7a!x;Sl~+BsM2-qe;ZyTiH z$TTh;v6oaWx+c8j0nJ*0HflnX>FiCIze-p$kKAKl3wwK;Y5_^r6t23A`POZH4r;qE zY_mX#k&vvs2H;!oaEc)G_9ltzq$iZQ+3LkML0*P7r~IcoJ|HP6k$U4}3X0C0);m*9 z*X0__AUu<^Mr>-v`$UU`DkB4nw2C)VF; zC(E8jbw)YoIm;%`E#n%pq?)p(AZ0INYq)IlNHu5Z9Y&Ps`(5?OF}~uuG)tH#aC8k-Y(JOokHw@BZl2@an0z5gYbk0 zuO8?zr^Qn6-dRjnk%ins9$74KXp`tR%kFOZ|UKc?trkn8k&D9Ft)ASIcXB`AQAxT!bD^4*Rmm2UtbdU${ zJl_KNVv6#4T4z=+R0`q5u^n8zB#~ptjTs4Y$?xBv=jcfukoj{TBy)`kIK7IA-VLZ? ztE4z*b?@YzNKf4n7y0L92}Rtr;f$|hv3X$E2xZz0xyh#Osw7dy?xgAD#XTuaRgV`; zQrVk*XB=PuWxAWv9u0ClVqvM<-^?KxB3KoJ7aUw}Jkfi@z*O_HrOc6T3i>;A@x7US70TV0b7ax-v z%=pr{iSi{UT#_l&Vj7&$if8325r`7hh;PJ@m@^-0HT62zDyDM)th@i?psP8>^&{3Zq}KR% zL}vFiB|V7-=FOH2v8MaXgozZSh3qjg&~i@5-KM<>L*SLO&n^$JC`-4jag-C>yh5I< z9oYxTUZCTq2TVCcY6iM}Khdr?tNHa@T8K6@5)lh`_Zviwsxr~UpS`^)7YLSkHI=bU ze92o*jI9#}@+Izmq`8D>3U6@Uh_4+m5Hc*-v)U}=3!5zcysIU)QcLOdxxIdRYgyE8 zAO}t^F%R%w4*be*Ykyuu1<9^*?WIe9akprQCjen(@#4Qoyo=j$&}tlxo%w!X7Ypdr z&B;sccFWU|(Mtr~o@~ZdDh-EYr-hJ%5Wf^PlR8&Z9!_Wp%eW6sSYirJ2?I~Rd4B%iP`olB{v(prTIMoJxeQS5maO^hpDR zq9q^UCS@7t8rxo%3IMAQbrwk8cPwniiP&XxiWt~jAirrpo5^mMzn^1)cybdb3gL65 z`vz7s$vk~?c73eZJyHCs85r6bHJ}ZrdiG)(J)C1u5(M8;@dx?a{fK;9BuwF&m}rzL zkn##;q7~VMcFmMs$oPA;R zjl7~^-*5#u+}DUlhY#^QQRR@h0Y>^pC+ko1N)8>*I-|IdI6yQr`LhC1Y!vTHZ}BDP zXT#>@qPH|~rgp^PKIK{nhS3T;Lxeh#pfP_EXt2KJ5K9TDxlWux?Y@+X0p!fgew zs;8n{5IAn4(&%F$4O}>CAFs+*@ zL+%!%#OP=;eYFK*Hm-0JH>>m z-k05upkIZ0sR5wk?%ywMRe?>PE}A`G=lc0ks7<@&waDj_cET(r0N<&Ob)v1|F&YNr z{u_MpnSVazy1aAarWjI1)S7aE1%|kWsx?t5S4kZW7bW{Gg9Dow?wdukGvpf(C3--A z%xVCOGV7(GAG`Xsq^oAN+S?-nb7}z?Q#+!bwX%UWK`o_JiM@w!X(sHrUgH6r)3Xte z-C*vgw?&7jmc{23T?8d^Y=m06$#TBY`gp%07S-Qh*V%v5m0J zq3mNh|L(!5n!JsV;vGm7c*_Jt0Hg(@*)wKWvbrZV!ZCNq)_N8k8b8n&j;-R5{4ilh zRC&o)8mxTESPT#%(MW4(&WXcy4Qk#PZx6XMBj1i0VA6_)LK5W{)w6A4NcOsV6@hl~ z8ixkv_ypi#GjX}C$MJ=tv))Q#hE{5h)+5zJ8TQnNmm7NXUp8NgCOqQ%o~3@~+RpCT ziflCVF3UKBOaF8}UNRM~%KdWO_YonFfFd7-rtl-gI;tO>p1H`OzT%%2(2ngJUuhsp zi(j~MQ{kN0Bu`N;uTX1tqv=&V=oo??DIIWtsuOD_J46F@Cmo?qj+aiKMJKY4gTB|| z)7!_PCjiy##eg{MiyvY6JpoYrAIvA8%)y~6Mm#CW2=^FBcxaaO-BpiH)n;*pZ zzJq@%QTdv7^k6h6qG+glsb1*E`A&lzKL#U8(N+wRf#sS9eB}g<@1?H z%@MtScEDkWN&V5<{QCz(>iR<8q~FYcMK@WSXr!y6^e>_3azLJ&6=*tQ5=ne?sg1r~ zO?M+-X7Y$8jm&SWeJ|XvXve~l7&Nle>j!<$|7P1Cd|BVtqueN30*P+(`F}1>7Lc+E zhAWSLE@5>#MMtQw0CFSW)k_;3m(S+h>jhDUGOI!nRTySCz#6?eW|GWemL=r(5^RQV zSyyzvh;^HC*Te#YoNyn+k=80oG7tP8E77flvvQhZ`}WRE;A3Bf;$(k`Jo+X1q`&8P zS|(`fcjE`G=q^NMf3RgOCe0UN)$D89!GxDKRFCE1OI%a0(_YFUA=;7sC(6^{2124o zR@XP#lNxElC}fXyyiBh~Y<8Mxc;w^|!3-%}jo+!F;jxTHl&(g<;7`p%)N{u(BCN}| zI>S)HzTJ zKv;juNFJDd6+g3J8L6-9pu|O^5NKiQ_dMt16e48_BNX0QbNYYbZxR3b=MLxt^6B8- V=Mc1O`p5s)Da&ifmA|wI`5#n5e98a- literal 0 HcmV?d00001 diff --git a/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx b/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx index f49054b90..02887b86a 100644 --- a/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx +++ b/frontend/src/pages/GeneralSettings/VectorDatabase/index.jsx @@ -9,6 +9,7 @@ import LanceDbLogo from "@/media/vectordbs/lancedb.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png"; import MilvusLogo from "@/media/vectordbs/milvus.png"; +import ZillizLogo from "@/media/vectordbs/zilliz.png"; import PreLoader from "@/components/Preloader"; import ChangeWarningModal from "@/components/ChangeWarning"; import { MagnifyingGlass } from "@phosphor-icons/react"; @@ -19,6 +20,7 @@ import QDrantDBOptions from "@/components/VectorDBSelection/QDrantDBOptions"; import WeaviateDBOptions from "@/components/VectorDBSelection/WeaviateDBOptions"; import VectorDBItem from "@/components/VectorDBSelection/VectorDBItem"; import MilvusDBOptions from "@/components/VectorDBSelection/MilvusDBOptions"; +import ZillizCloudOptions from "@/components/VectorDBSelection/ZillizCloudOptions"; export default function GeneralVectorDatabase() { const [saving, setSaving] = useState(false); @@ -33,7 +35,6 @@ export default function GeneralVectorDatabase() { useEffect(() => { async function fetchKeys() { const _settings = await System.keys(); - console.log(_settings); setSettings(_settings); setSelectedVDB(_settings?.VectorDB || "lancedb"); setHasEmbeddings(_settings?.HasExistingEmbeddings || false); @@ -66,6 +67,14 @@ export default function GeneralVectorDatabase() { options: , description: "100% cloud-based vector database for enterprise use cases.", }, + { + name: "Zilliz Cloud", + value: "zilliz", + logo: ZillizLogo, + options: , + description: + "Cloud hosted vector database built for enterprise with SOC 2 compliance.", + }, { name: "QDrant", value: "qdrant", diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx index 3b0046382..ae5730276 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx @@ -10,6 +10,7 @@ import TogetherAILogo from "@/media/llmprovider/togetherai.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import MistralLogo from "@/media/llmprovider/mistral.jpeg"; +import ZillizLogo from "@/media/vectordbs/zilliz.png"; import ChromaLogo from "@/media/vectordbs/chroma.png"; import PineconeLogo from "@/media/vectordbs/pinecone.png"; import LanceDbLogo from "@/media/vectordbs/lancedb.png"; @@ -139,6 +140,13 @@ const VECTOR_DB_PRIVACY = { ], logo: MilvusLogo, }, + zilliz: { + name: "Zilliz Cloud", + description: [ + "Your vectors and document text are stored on your Zilliz cloud cluster.", + ], + logo: ZillizLogo, + }, lancedb: { name: "LanceDB", description: [ diff --git a/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx index 37e0e5b73..af0b5662d 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/VectorDatabaseConnection/index.jsx @@ -6,6 +6,7 @@ import LanceDbLogo from "@/media/vectordbs/lancedb.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png"; import MilvusLogo from "@/media/vectordbs/milvus.png"; +import ZillizLogo from "@/media/vectordbs/zilliz.png"; import System from "@/models/system"; import paths from "@/utils/paths"; import PineconeDBOptions from "@/components/VectorDBSelection/PineconeDBOptions"; @@ -14,6 +15,7 @@ import QDrantDBOptions from "@/components/VectorDBSelection/QDrantDBOptions"; import WeaviateDBOptions from "@/components/VectorDBSelection/WeaviateDBOptions"; import LanceDBOptions from "@/components/VectorDBSelection/LanceDBOptions"; import MilvusOptions from "@/components/VectorDBSelection/MilvusDBOptions"; +import ZillizCloudOptions from "@/components/VectorDBSelection/ZillizCloudOptions"; import showToast from "@/utils/toast"; import { useNavigate } from "react-router-dom"; import VectorDBItem from "@/components/VectorDBSelection/VectorDBItem"; @@ -68,6 +70,14 @@ export default function VectorDatabaseConnection({ options: , description: "100% cloud-based vector database for enterprise use cases.", }, + { + name: "Zilliz Cloud", + value: "zilliz", + logo: ZillizLogo, + options: , + description: + "Cloud hosted vector database built for enterprise with SOC 2 compliance.", + }, { name: "QDrant", value: "qdrant", diff --git a/server/.env.example b/server/.env.example index 26c51927c..23e20bb13 100644 --- a/server/.env.example +++ b/server/.env.example @@ -96,6 +96,11 @@ VECTOR_DB="lancedb" # MILVUS_USERNAME= # MILVUS_PASSWORD= +# Enable all below if you are using vector database: Zilliz Cloud. +# VECTOR_DB="zilliz" +# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com" +# ZILLIZ_API_TOKEN=api-token-here + # CLOUD DEPLOYMENT VARIRABLES ONLY # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. # STORAGE_DIR= # absolute filesystem path with no trailing slash diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index 1c4069ac9..90de463f0 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -63,6 +63,12 @@ const SystemSettings = { MilvusPassword: !!process.env.MILVUS_PASSWORD, } : {}), + ...(vectorDB === "zilliz" + ? { + ZillizEndpoint: process.env.ZILLIZ_ENDPOINT, + ZillizApiToken: process.env.ZILLIZ_API_TOKEN, + } + : {}), LLMProvider: llmProvider, ...(llmProvider === "openai" ? { diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index 2eed9057c..b72bb7977 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -19,6 +19,9 @@ function getVectorDbClass() { case "milvus": const { Milvus } = require("../vectorDbProviders/milvus"); return Milvus; + case "zilliz": + const { Zilliz } = require("../vectorDbProviders/zilliz"); + return Zilliz; default: throw new Error("ENV: No VECTOR_DB value found in environment!"); } diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index f44b040b7..9e89047ff 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -199,6 +199,16 @@ const KEY_MAPPING = { checks: [isNotEmpty], }, + // Zilliz Cloud Options + ZillizEndpoint: { + envKey: "ZILLIZ_ENDPOINT", + checks: [isValidURL], + }, + ZillizApiToken: { + envKey: "ZILLIZ_API_TOKEN", + checks: [isNotEmpty], + }, + // Together Ai Options TogetherAiApiKey: { envKey: "TOGETHER_AI_API_KEY", @@ -316,6 +326,7 @@ function supportedVectorDB(input = "") { "weaviate", "qdrant", "milvus", + "zilliz", ]; return supported.includes(input) ? null diff --git a/server/utils/vectorDbProviders/zilliz/index.js b/server/utils/vectorDbProviders/zilliz/index.js new file mode 100644 index 000000000..b8493e1c2 --- /dev/null +++ b/server/utils/vectorDbProviders/zilliz/index.js @@ -0,0 +1,365 @@ +const { + DataType, + MetricType, + IndexType, + MilvusClient, +} = require("@zilliz/milvus2-sdk-node"); +const { RecursiveCharacterTextSplitter } = require("langchain/text_splitter"); +const { v4: uuidv4 } = require("uuid"); +const { storeVectorResult, cachedVectorInformation } = require("../../files"); +const { + toChunks, + getLLMProvider, + getEmbeddingEngineSelection, +} = require("../../helpers"); + +// Zilliz is basically a copy of Milvus DB class with a different constructor +// to connect to the cloud +const Zilliz = { + name: "Zilliz", + connect: async function () { + if (process.env.VECTOR_DB !== "zilliz") + throw new Error("Zilliz::Invalid ENV settings"); + + const client = new MilvusClient({ + address: process.env.ZILLIZ_ENDPOINT, + token: process.env.ZILLIZ_API_TOKEN, + }); + + const { isHealthy } = await client.checkHealth(); + if (!isHealthy) + throw new Error( + "Zilliz::Invalid Heartbeat received - is the instance online?" + ); + + return { client }; + }, + heartbeat: async function () { + await this.connect(); + return { heartbeat: Number(new Date()) }; + }, + totalVectors: async function () { + const { client } = await this.connect(); + const { collection_names } = await client.listCollections(); + const total = collection_names.reduce(async (acc, collection_name) => { + const statistics = await client.getCollectionStatistics({ + collection_name, + }); + return Number(acc) + Number(statistics?.data?.row_count ?? 0); + }, 0); + return total; + }, + namespaceCount: async function (_namespace = null) { + const { client } = await this.connect(); + const statistics = await client.getCollectionStatistics({ + collection_name: _namespace, + }); + return Number(statistics?.data?.row_count ?? 0); + }, + namespace: async function (client, namespace = null) { + if (!namespace) throw new Error("No namespace value provided."); + const collection = await client + .getCollectionStatistics({ collection_name: namespace }) + .catch(() => null); + return collection; + }, + hasNamespace: async function (namespace = null) { + if (!namespace) return false; + const { client } = await this.connect(); + return await this.namespaceExists(client, namespace); + }, + namespaceExists: async function (client, namespace = null) { + if (!namespace) throw new Error("No namespace value provided."); + const { value } = await client + .hasCollection({ collection_name: namespace }) + .catch((e) => { + console.error("Zilliz::namespaceExists", e.message); + return { value: false }; + }); + return value; + }, + deleteVectorsInNamespace: async function (client, namespace = null) { + await client.dropCollection({ collection_name: namespace }); + return true; + }, + // Zilliz requires a dimension aspect for collection creation + // we pass this in from the first chunk to infer the dimensions like other + // providers do. + getOrCreateCollection: async function (client, namespace, dimensions = null) { + const isExists = await this.namespaceExists(client, namespace); + if (!isExists) { + if (!dimensions) + throw new Error( + `Zilliz:getOrCreateCollection Unable to infer vector dimension from input. Open an issue on Github for support.` + ); + + await client.createCollection({ + collection_name: namespace, + fields: [ + { + name: "id", + description: "id", + data_type: DataType.VarChar, + max_length: 255, + is_primary_key: true, + }, + { + name: "vector", + description: "vector", + data_type: DataType.FloatVector, + dim: dimensions, + }, + { + name: "metadata", + decription: "metadata", + data_type: DataType.JSON, + }, + ], + }); + await client.createIndex({ + collection_name: namespace, + field_name: "vector", + index_type: IndexType.AUTOINDEX, + metric_type: MetricType.COSINE, + }); + await client.loadCollectionSync({ + collection_name: namespace, + }); + } + }, + addDocumentToNamespace: async function ( + namespace, + documentData = {}, + fullFilePath = null + ) { + const { DocumentVectors } = require("../../../models/vectors"); + try { + let vectorDimension = null; + const { pageContent, docId, ...metadata } = documentData; + if (!pageContent || pageContent.length == 0) return false; + + console.log("Adding new vectorized document into namespace", namespace); + const cacheResult = await cachedVectorInformation(fullFilePath); + if (cacheResult.exists) { + const { client } = await this.connect(); + const { chunks } = cacheResult; + const documentVectors = []; + vectorDimension = chunks[0][0].values.length || null; + + await this.getOrCreateCollection(client, namespace, vectorDimension); + for (const chunk of chunks) { + // Before sending to Pinecone and saving the records to our db + // we need to assign the id of each chunk that is stored in the cached file. + const newChunks = chunk.map((chunk) => { + const id = uuidv4(); + documentVectors.push({ docId, vectorId: id }); + return { id, vector: chunk.values, metadata: chunk.metadata }; + }); + const insertResult = await client.insert({ + collection_name: namespace, + data: newChunks, + }); + + if (insertResult?.status.error_code !== "Success") { + throw new Error( + `Error embedding into Zilliz! Reason:${insertResult?.status.reason}` + ); + } + } + await DocumentVectors.bulkInsert(documentVectors); + await client.flushSync({ collection_names: [namespace] }); + return true; + } + + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: + getEmbeddingEngineSelection()?.embeddingMaxChunkLength || 1_000, + chunkOverlap: 20, + }); + const textChunks = await textSplitter.splitText(pageContent); + + console.log("Chunks created from document:", textChunks.length); + const LLMConnector = getLLMProvider(); + const documentVectors = []; + const vectors = []; + const vectorValues = await LLMConnector.embedChunks(textChunks); + + if (!!vectorValues && vectorValues.length > 0) { + for (const [i, vector] of vectorValues.entries()) { + if (!vectorDimension) vectorDimension = vector.length; + const vectorRecord = { + id: uuidv4(), + values: vector, + // [DO NOT REMOVE] + // LangChain will be unable to find your text if you embed manually and dont include the `text` key. + metadata: { ...metadata, text: textChunks[i] }, + }; + + vectors.push(vectorRecord); + documentVectors.push({ docId, vectorId: vectorRecord.id }); + } + } else { + throw new Error( + "Could not embed document chunks! This document will not be recorded." + ); + } + + if (vectors.length > 0) { + const chunks = []; + const { client } = await this.connect(); + await this.getOrCreateCollection(client, namespace, vectorDimension); + + console.log("Inserting vectorized chunks into Zilliz."); + for (const chunk of toChunks(vectors, 100)) { + chunks.push(chunk); + const insertResult = await client.insert({ + collection_name: namespace, + data: chunk.map((item) => ({ + id: item.id, + vector: item.values, + metadata: chunk.metadata, + })), + }); + + if (insertResult?.status.error_code !== "Success") { + throw new Error( + `Error embedding into Zilliz! Reason:${insertResult?.status.reason}` + ); + } + } + await storeVectorResult(chunks, fullFilePath); + await client.flushSync({ collection_names: [namespace] }); + } + + await DocumentVectors.bulkInsert(documentVectors); + return true; + } catch (e) { + console.error(e); + console.error("addDocumentToNamespace", e.message); + return false; + } + }, + deleteDocumentFromNamespace: async function (namespace, docId) { + const { DocumentVectors } = require("../../../models/vectors"); + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) return; + const knownDocuments = await DocumentVectors.where({ docId }); + if (knownDocuments.length === 0) return; + + const vectorIds = knownDocuments.map((doc) => doc.vectorId); + const queryIn = vectorIds.map((v) => `'${v}'`).join(","); + await client.deleteEntities({ + collection_name: namespace, + expr: `id in [${queryIn}]`, + }); + + const indexes = knownDocuments.map((doc) => doc.id); + await DocumentVectors.deleteIds(indexes); + + // Even after flushing Zilliz can take some time to re-calc the count + // so all we can hope to do is flushSync so that the count can be correct + // on a later call. + await client.flushSync({ collection_names: [namespace] }); + return true; + }, + performSimilaritySearch: async function ({ + namespace = null, + input = "", + LLMConnector = null, + similarityThreshold = 0.25, + }) { + if (!namespace || !input || !LLMConnector) + throw new Error("Invalid request to performSimilaritySearch."); + + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) { + return { + contextTexts: [], + sources: [], + message: "Invalid query - no documents found for workspace!", + }; + } + + const queryVector = await LLMConnector.embedTextInput(input); + const { contextTexts, sourceDocuments } = await this.similarityResponse( + client, + namespace, + queryVector, + similarityThreshold + ); + + const sources = sourceDocuments.map((metadata, i) => { + return { ...metadata, text: contextTexts[i] }; + }); + return { + contextTexts, + sources: this.curateSources(sources), + message: false, + }; + }, + similarityResponse: async function ( + client, + namespace, + queryVector, + similarityThreshold = 0.25 + ) { + const result = { + contextTexts: [], + sourceDocuments: [], + scores: [], + }; + const response = await client.search({ + collection_name: namespace, + vectors: queryVector, + }); + response.results.forEach((match) => { + if (match.score < similarityThreshold) return; + result.contextTexts.push(match.metadata.text); + result.sourceDocuments.push(match); + result.scores.push(match.score); + }); + return result; + }, + "namespace-stats": async function (reqBody = {}) { + const { namespace = null } = reqBody; + if (!namespace) throw new Error("namespace required"); + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) + throw new Error("Namespace by that name does not exist."); + const stats = await this.namespace(client, namespace); + return stats + ? stats + : { message: "No stats were able to be fetched from DB for namespace" }; + }, + "delete-namespace": async function (reqBody = {}) { + const { namespace = null } = reqBody; + const { client } = await this.connect(); + if (!(await this.namespaceExists(client, namespace))) + throw new Error("Namespace by that name does not exist."); + + const statistics = await this.namespace(client, namespace); + await this.deleteVectorsInNamespace(client, namespace); + const vectorCount = Number(statistics?.data?.row_count ?? 0); + return { + message: `Namespace ${namespace} was deleted along with ${vectorCount} vectors.`, + }; + }, + curateSources: function (sources = []) { + const documents = []; + for (const source of sources) { + const { metadata = {} } = source; + if (Object.keys(metadata).length > 0) { + documents.push({ + ...metadata, + ...(source.hasOwnProperty("pageContent") + ? { text: source.pageContent } + : {}), + }); + } + } + + return documents; + }, +}; + +module.exports.Zilliz = Zilliz;