Update Render.com image for AnythignLLM to latest

This commit is contained in:
timothycarambat 2024-01-17 18:12:25 -08:00
commit addb3d0c3e
110 changed files with 4364 additions and 519 deletions

44
.github/ISSUE_TEMPLATE/01_bug.yml vendored Normal file
View File

@ -0,0 +1,44 @@
name: 🐛 Bug Report
description: File a bug report for AnythingLLM
title: "[BUG]: "
labels: [possible bug]
body:
- type: markdown
attributes:
value: |
Use this template to file a bug report for AnythingLLM. Please be as descriptive as possible to allow everyone to replicate and solve your issue.
Want help contributing a PR? Use our repo chatbot by OnboardAI! https://learnthisrepo.com/anythingllm
- type: dropdown
id: runtime
attributes:
label: How are you running AnythingLLM?
description: AnythingLLM can be run in many environments, pick the one that best represents where you encounter the bug.
options:
- Docker (local)
- Docker (remote machine)
- Local development
- AnythingLLM desktop app
- Not listed
default: 0
validations:
required: true
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen?
validations:
required: true
- type: textarea
id: reproduction
attributes:
label: Are there known steps to reproduce?
description: |
Let us know how to reproduce the bug and we may be able to fix it more
quickly. This is not required, but it is helpful.
validations:
required: false

22
.github/ISSUE_TEMPLATE/02_feature.yml vendored Normal file
View File

@ -0,0 +1,22 @@
name: ✨ New Feature suggestion
description: Suggest a new feature for AnythingLLM!
title: "[FEAT]: "
labels: [enhancement, feature request]
body:
- type: markdown
attributes:
value: |
Share a new idea for a feature or improvement. Be sure to search existing
issues first to avoid duplicates.
Want help contributing a PR? Use our repo chatbot by OnboardAI! https://learnthisrepo.com/anythingllm
- type: textarea
id: description
attributes:
label: What would you like to see?
description: |
Describe the feature and why it would be useful to your use-case as well as others.
validations:
required: true

View File

@ -0,0 +1,13 @@
name: 📚 Documentation improvement
title: "[DOCS]: "
description: Report an issue or problem with the documentation.
labels: [documentation]
body:
- type: textarea
id: description
attributes:
label: Description
description: Describe the issue with the documentation that is giving you trouble or causing confusion.
validations:
required: true

5
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,5 @@
blank_issues_enabled: true
contact_links:
- name: 🧑‍🤝‍🧑 Community Discord
url: https://discord.gg/6UyHPeGZAC
about: Interact with the Mintplex Labs community here by asking for help, discussing and more!

View File

@ -14,11 +14,12 @@ on:
push:
branches: ['master'] # master branch only. Do not modify.
paths-ignore:
- '*.md'
- '**.md'
- 'cloud-deployments/*'
- 'images/*'
- '.vscode/*'
- 'images/**/*'
- '.vscode/**/*'
- '**/.env.example'
- '.github/ISSUE_TEMPLATE/**/*'
jobs:
push_multi_platform_to_registries:
@ -31,10 +32,18 @@ jobs:
- name: Check out the repo
uses: actions/checkout@v4
- name: Parse repository name to lowercase
- name: Check if DockerHub build needed
shell: bash
run: echo "repo=${GITHUB_REPOSITORY,,}" >> $GITHUB_OUTPUT
id: lowercase_repo
run: |
# Check if the secret for USERNAME is set (don't even check for the password)
if [[ -z "${{ secrets.DOCKER_USERNAME }}" ]]; then
echo "DockerHub build not needed"
echo "enabled=false" >> $GITHUB_OUTPUT
else
echo "DockerHub build needed"
echo "enabled=true" >> $GITHUB_OUTPUT
fi
id: dockerhub
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -44,6 +53,8 @@ jobs:
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
# Only login to the Docker Hub if the repo is mintplex/anythingllm, to allow for forks to build on GHCR
if: steps.dockerhub.outputs.enabled == 'true'
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
@ -60,8 +71,14 @@ jobs:
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: |
mintplexlabs/anythingllm
${{ steps.dockerhub.outputs.enabled == 'true' && 'mintplexlabs/anythingllm' || '' }}
ghcr.io/${{ github.repository }}
tags: |
type=raw,value=latest,enable={{is_default_branch}}
type=ref,event=branch
type=ref,event=tag
type=ref,event=pr
- name: Build and push multi-platform Docker image
uses: docker/build-push-action@v5
@ -70,8 +87,7 @@ jobs:
file: ./docker/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
${{ steps.meta.outputs.tags }}
${{ github.ref_name == 'master' && 'mintplexlabs/anythingllm:latest' || '' }}
${{ github.ref_name == 'master' && format('ghcr.io/{0}:{1}', steps.lowercase_repo.outputs.repo, 'latest') || '' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -8,11 +8,13 @@ on:
push:
branches: ['render']
paths-ignore:
- 'render.yaml'
- '*.md'
- '**.md'
- 'cloud-deployments/*'
- 'images/*'
- '.vscode/*'
- 'images/**/*'
- '.vscode/**/*'
- '**/.env.example'
- '.github/ISSUE_TEMPLATE/**/*'
- 'render.yaml'
jobs:
push_to_registries:

View File

@ -2,10 +2,13 @@
"cSpell.words": [
"Dockerized",
"Langchain",
"Milvus",
"Ollama",
"openai",
"Qdrant",
"Weaviate"
"vectordbs",
"Weaviate",
"Zilliz"
],
"eslint.experimental.useFlatConfig": true
}

View File

@ -1,4 +1,5 @@
<a name="readme-top"></a>
<p align="center">
<a href="https://useanything.com"><img src="https://github.com/Mintplex-Labs/anything-llm/blob/master/images/wordmark.png?raw=true" alt="AnythingLLM logo"></a>
</p>
@ -38,13 +39,14 @@ A full-stack application that enables you to turn any document, resource, or pie
</details>
### Product Overview
AnythingLLM is a full-stack application where you can use commercial off-the-shelf LLMs or popular open source LLMs and vectorDB solutions to build a private ChatGPT with no compromises that you can run locally as well as host remotely and be able to chat intelligently with any documents you provide it.
AnythingLLM divides your documents into objects called `workspaces`. A Workspace functions a lot like a thread, but with the addition of containerization of your documents. Workspaces can share documents, but they do not talk to each other so you can keep your context for each workspace clean.
Some cool features of AnythingLLM
- **Multi-user instance support and permissioning**
- Multiple document type support (PDF, TXT, DOCX, etc)
- Manage documents in your vector database from a simple UI
@ -57,7 +59,9 @@ Some cool features of AnythingLLM
- Full Developer API for custom integrations!
### Supported LLMs, Embedders, and Vector Databases
**Supported LLMs:**
- [Any open-source llama.cpp compatible model](/server/storage/models/README.md#text-generation-llm-selection)
- [OpenAI](https://openai.com)
- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
@ -66,8 +70,11 @@ Some cool features of AnythingLLM
- [Ollama (chat models)](https://ollama.ai/)
- [LM Studio (all models)](https://lmstudio.ai)
- [LocalAi (all models)](https://localai.io/)
- [Together AI (chat models)](https://www.together.ai/)
- [Mistral](https://mistral.ai/)
**Supported Embedding models:**
- [AnythingLLM Native Embedder](/server/storage/models/README.md) (default)
- [OpenAI](https://openai.com)
- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
@ -75,42 +82,45 @@ Some cool features of AnythingLLM
- [LocalAi (all)](https://localai.io/)
**Supported Vector Databases:**
- [LanceDB](https://github.com/lancedb/lancedb) (default)
- [Pinecone](https://pinecone.io)
- [Chroma](https://trychroma.com)
- [Weaviate](https://weaviate.io)
- [QDrant](https://qdrant.tech)
- [Milvus](https://milvus.io)
- [Zilliz](https://zilliz.com)
### Technical Overview
This monorepo consists of three main sections:
- `frontend`: A viteJS + React frontend that you can run to easily create and manage all your content the LLM can use.
- `server`: A NodeJS express server to handle all the interactions and do all the vectorDB management and LLM interactions.
- `docker`: Docker instructions and build process + information for building from source.
- `collector`: NodeJS express server that process and parses documents from the UI.
## 🛳 Self Hosting
Mintplex Labs & the community maintain a number of deployment methods, scripts, and templates that you can use to run AnythingLLM locally. Refer to the table below to read how to deploy on your preferred environment or to automatically deploy.
| Docker | AWS | GCP | Digital Ocean | Render.com |
|----------------------------------------|----:|-----|---------------|------------|
| [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][aws-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] |
## How to setup for development
- `yarn setup` To fill in the required `.env` files you'll need in each of the application sections (from root of repo).
- Go fill those out before proceeding. Ensure `server/.env.development` is filled or else things won't work right.
- `yarn dev:server` To boot the server locally (from root of repo).
- `yarn dev:frontend` To boot the frontend locally (from root of repo).
- `yarn dev:collector` To then run the document collector (from root of repo).
[Learn about documents](./server/storage/documents/DOCUMENTS.md)
[Learn about vector caching](./server/storage/vector-cache/VECTOR_CACHE.md)
## Contributing
- create issue
- create PR with branch name format of `<issue number>-<short name>`
- yee haw let's merge
@ -119,12 +129,15 @@ Mintplex Labs & the community maintain a number of deployment methods, scripts,
<summary><kbd>Telemetry for AnythingLLM</kbd></summary>
## Telemetry
AnythingLLM by Mintplex Labs Inc contains a telemetry feature that collects anonymous usage information.
### Why?
We use this information to help us understand how AnythingLLM is used, to help us prioritize work on new features and bug fixes, and to help us improve AnythingLLM's performance and stability.
### Opting out
Set `DISABLE_TELEMETRY` in your server or docker .env settings to "true" to opt out of telemetry.
```
@ -132,7 +145,9 @@ DISABLE_TELEMETRY="true"
```
### What do you explicitly track?
We will only track usage details that help us make product and roadmap decisions, specifically:
- Version of your installation
- When a document is added or removed. No information _about_ the document. Just that the event occurred. This gives us an idea of use.
- Type of vector database in use. Let's us know which vector database provider is the most used to prioritize changes when updates arrive for that provider.
@ -160,6 +175,7 @@ Copyright © 2023 [Mintplex Labs][profile-link]. <br />
This project is [MIT](./LICENSE) licensed.
<!-- LINK GROUP -->
[back-to-top]: https://img.shields.io/badge/-BACK_TO_TOP-222628?style=flat-square
[profile-link]: https://github.com/mintplex-labs
[vector-admin]: https://github.com/mintplex-labs/vector-admin

View File

@ -64,6 +64,12 @@ server {
listen 80;
server_name [insert FQDN here];
location / {
# Prevent timeouts on long-running requests.
proxy_connect_timeout 605;
proxy_send_timeout 605;
proxy_read_timeout 605;
send_timeout 605;
keepalive_timeout 605;
proxy_pass http://0.0.0.0:3001;
}
}

View File

@ -82,7 +82,8 @@
"\n",
"#!/bin/bash\n",
"# check output of userdata script with sudo tail -f /var/log/cloud-init-output.log\n",
"sudo yum install docker -y\n",
"sudo yum install docker iptables -y\n",
"sudo iptables -A OUTPUT -m owner ! --uid-owner root -d 169.254.169.254 -j DROP\n",
"sudo systemctl enable docker\n",
"sudo systemctl start docker\n",
"mkdir -p /home/ec2-user/anythingllm\n",

View File

@ -29,14 +29,21 @@ app.post("/process", async function (request, response) {
const targetFilename = path
.normalize(filename)
.replace(/^(\.\.(\/|\\|$))+/, "");
const { success, reason } = await processSingleFile(targetFilename);
response.status(200).json({ filename: targetFilename, success, reason });
const {
success,
reason,
documents = [],
} = await processSingleFile(targetFilename);
response
.status(200)
.json({ filename: targetFilename, success, reason, documents });
} catch (e) {
console.error(e);
response.status(200).json({
filename: filename,
success: false,
reason: "A processing error occurred.",
documents: [],
});
}
return;
@ -45,14 +52,15 @@ app.post("/process", async function (request, response) {
app.post("/process-link", async function (request, response) {
const { link } = reqBody(request);
try {
const { success, reason } = await processLink(link);
response.status(200).json({ url: link, success, reason });
const { success, reason, documents = [] } = await processLink(link);
response.status(200).json({ url: link, success, reason, documents });
} catch (e) {
console.error(e);
response.status(200).json({
url: link,
success: false,
reason: "A processing error occurred.",
documents: [],
});
}
return;

View File

@ -12,7 +12,11 @@ async function scrapeGenericUrl(link) {
if (!content.length) {
console.error(`Resulting URL content was empty at ${link}.`);
return { success: false, reason: `No URL content found at ${link}.` };
return {
success: false,
reason: `No URL content found at ${link}.`,
documents: [],
};
}
const url = new URL(link);
@ -32,9 +36,12 @@ async function scrapeGenericUrl(link) {
token_count_estimate: tokenizeString(content).length,
};
writeToServerDocuments(data, `url-${slugify(filename)}-${data.id}`);
const document = writeToServerDocuments(
data,
`url-${slugify(filename)}-${data.id}`
);
console.log(`[SUCCESS]: URL ${link} converted & ready for embedding.\n`);
return { success: true, reason: null };
return { success: true, reason: null, documents: [document] };
}
async function getPageContent(link) {

View File

@ -31,6 +31,7 @@ async function asAudio({ fullFilePath = "", filename = "" }) {
return {
success: false,
reason: `Failed to parse content from ${filename}.`,
documents: [],
};
}
@ -43,7 +44,11 @@ async function asAudio({ fullFilePath = "", filename = "" }) {
if (!content.length) {
console.error(`Resulting text content was empty for ${filename}.`);
trashFile(fullFilePath);
return { success: false, reason: `No text content found in ${filename}.` };
return {
success: false,
reason: `No text content found in ${filename}.`,
documents: [],
};
}
const data = {
@ -60,12 +65,15 @@ async function asAudio({ fullFilePath = "", filename = "" }) {
token_count_estimate: tokenizeString(content).length,
};
writeToServerDocuments(data, `${slugify(filename)}-${data.id}`);
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}`
);
trashFile(fullFilePath);
console.log(
`[SUCCESS]: ${filename} transcribed, converted & ready for embedding.\n`
);
return { success: true, reason: null };
return { success: true, reason: null, documents: [document] };
}
async function convertToWavAudioData(sourcePath) {

View File

@ -24,7 +24,11 @@ async function asDocX({ fullFilePath = "", filename = "" }) {
if (!pageContent.length) {
console.error(`Resulting text content was empty for ${filename}.`);
trashFile(fullFilePath);
return { success: false, reason: `No text content found in ${filename}.` };
return {
success: false,
reason: `No text content found in ${filename}.`,
documents: [],
};
}
const content = pageContent.join("");
@ -42,10 +46,13 @@ async function asDocX({ fullFilePath = "", filename = "" }) {
token_count_estimate: tokenizeString(content).length,
};
writeToServerDocuments(data, `${slugify(filename)}-${data.id}`);
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}`
);
trashFile(fullFilePath);
console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`);
return { success: true, reason: null };
return { success: true, reason: null, documents: [document] };
}
module.exports = asDocX;

View File

@ -22,10 +22,15 @@ async function asMbox({ fullFilePath = "", filename = "" }) {
if (!mails.length) {
console.error(`Resulting mail items was empty for ${filename}.`);
trashFile(fullFilePath);
return { success: false, reason: `No mail items found in ${filename}.` };
return {
success: false,
reason: `No mail items found in ${filename}.`,
documents: [],
};
}
let item = 1;
const documents = [];
for (const mail of mails) {
if (!mail.hasOwnProperty("text")) continue;
@ -52,14 +57,18 @@ async function asMbox({ fullFilePath = "", filename = "" }) {
};
item++;
writeToServerDocuments(data, `${slugify(filename)}-${data.id}-msg-${item}`);
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}-msg-${item}`
);
documents.push(document);
}
trashFile(fullFilePath);
console.log(
`[SUCCESS]: ${filename} messages converted & ready for embedding.\n`
);
return { success: true, reason: null };
return { success: true, reason: null, documents };
}
module.exports = asMbox;

View File

@ -20,7 +20,11 @@ async function asOfficeMime({ fullFilePath = "", filename = "" }) {
if (!content.length) {
console.error(`Resulting text content was empty for ${filename}.`);
trashFile(fullFilePath);
return { success: false, reason: `No text content found in ${filename}.` };
return {
success: false,
reason: `No text content found in ${filename}.`,
documents: [],
};
}
const data = {
@ -37,10 +41,13 @@ async function asOfficeMime({ fullFilePath = "", filename = "" }) {
token_count_estimate: tokenizeString(content).length,
};
writeToServerDocuments(data, `${slugify(filename)}-${data.id}`);
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}`
);
trashFile(fullFilePath);
console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`);
return { success: true, reason: null };
return { success: true, reason: null, documents: [document] };
}
module.exports = asOfficeMime;

View File

@ -29,7 +29,11 @@ async function asPDF({ fullFilePath = "", filename = "" }) {
if (!pageContent.length) {
console.error(`Resulting text content was empty for ${filename}.`);
trashFile(fullFilePath);
return { success: false, reason: `No text content found in ${filename}.` };
return {
success: false,
reason: `No text content found in ${filename}.`,
documents: [],
};
}
const content = pageContent.join("");
@ -47,10 +51,13 @@ async function asPDF({ fullFilePath = "", filename = "" }) {
token_count_estimate: tokenizeString(content).length,
};
writeToServerDocuments(data, `${slugify(filename)}-${data.id}`);
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}`
);
trashFile(fullFilePath);
console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`);
return { success: true, reason: null };
return { success: true, reason: null, documents: [document] };
}
module.exports = asPDF;

View File

@ -19,7 +19,11 @@ async function asTxt({ fullFilePath = "", filename = "" }) {
if (!content?.length) {
console.error(`Resulting text content was empty for ${filename}.`);
trashFile(fullFilePath);
return { success: false, reason: `No text content found in ${filename}.` };
return {
success: false,
reason: `No text content found in ${filename}.`,
documents: [],
};
}
console.log(`-- Working ${filename} --`);
@ -37,10 +41,13 @@ async function asTxt({ fullFilePath = "", filename = "" }) {
token_count_estimate: tokenizeString(content).length,
};
writeToServerDocuments(data, `${slugify(filename)}-${data.id}`);
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}`
);
trashFile(fullFilePath);
console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`);
return { success: true, reason: null };
return { success: true, reason: null, documents: [document] };
}
module.exports = asTxt;

View File

@ -13,11 +13,13 @@ async function processSingleFile(targetFilename) {
return {
success: false,
reason: "Filename is a reserved filename and cannot be processed.",
documents: [],
};
if (!fs.existsSync(fullFilePath))
return {
success: false,
reason: "File does not exist in upload directory.",
documents: [],
};
const fileExtension = path.extname(fullFilePath).toLowerCase();
@ -25,6 +27,7 @@ async function processSingleFile(targetFilename) {
return {
success: false,
reason: `No file extension found. This file cannot be processed.`,
documents: [],
};
}
@ -33,6 +36,7 @@ async function processSingleFile(targetFilename) {
return {
success: false,
reason: `File extension ${fileExtension} not supported for parsing.`,
documents: [],
};
}

View File

@ -40,14 +40,19 @@ function writeToServerDocuments(
if (!fs.existsSync(destination))
fs.mkdirSync(destination, { recursive: true });
const destinationFilePath = path.resolve(destination, filename);
const destinationFilePath = path.resolve(destination, filename) + ".json";
fs.writeFileSync(
destinationFilePath + ".json",
JSON.stringify(data, null, 4),
{ encoding: "utf-8" }
);
return;
fs.writeFileSync(destinationFilePath, JSON.stringify(data, null, 4), {
encoding: "utf-8",
});
return {
...data,
// relative location string that can be passed into the /update-embeddings api
// that will work since we know the location exists and since we only allow
// 1-level deep folders this will always work. This still works for integrations like GitHub and YouTube.
location: destinationFilePath.split("/").slice(-2).join("/"),
};
}
// When required we can wipe the entire collector hotdir and tmp storage in case

View File

@ -1,6 +1,9 @@
const VALID_PROTOCOLS = ["https:", "http:"];
function validURL(url) {
try {
new URL(url);
const destination = new URL(url);
if (!VALID_PROTOCOLS.includes(destination.protocol)) return false;
return true;
} catch {}
return false;

View File

@ -40,6 +40,14 @@ GID='1000'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
###########################################
######## Embedding API SElECTION ##########
###########################################
@ -85,6 +93,17 @@ GID='1000'
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
# Enable all below if you are using vector database: Milvus.
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
# Enable all below if you are using vector database: Zilliz Cloud.
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.

View File

@ -16,6 +16,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@phosphor-icons/react": "^2.0.13",
"buffer": "^6.0.3",
"dompurify": "^3.0.8",
"he": "^1.2.0",
"highlight.js": "^11.9.0",
"lodash.debounce": "^4.0.8",
@ -30,6 +31,7 @@
"react-router-dom": "^6.3.0",
"react-tag-input-component": "^2.0.2",
"react-toastify": "^9.1.3",
"react-tooltip": "^5.25.2",
"text-case": "^1.0.9",
"truncate": "^3.0.0",
"uuid": "^9.0.0"

View File

@ -8,6 +8,8 @@ import PrivateRoute, {
import { ToastContainer } from "react-toastify";
import "react-toastify/dist/ReactToastify.css";
import Login from "@/pages/Login";
import OnboardingFlow from "@/pages/OnboardingFlow";
import { PfpProvider } from "./PfpContext";
import { LogoProvider } from "./LogoContext";
@ -19,30 +21,29 @@ const AdminInvites = lazy(() => import("@/pages/Admin/Invitations"));
const AdminWorkspaces = lazy(() => import("@/pages/Admin/Workspaces"));
const AdminSystem = lazy(() => import("@/pages/Admin/System"));
const GeneralChats = lazy(() => import("@/pages/GeneralSettings/Chats"));
const GeneralAppearance = lazy(() =>
import("@/pages/GeneralSettings/Appearance")
const GeneralAppearance = lazy(
() => import("@/pages/GeneralSettings/Appearance")
);
const GeneralApiKeys = lazy(() => import("@/pages/GeneralSettings/ApiKeys"));
const GeneralLLMPreference = lazy(() =>
import("@/pages/GeneralSettings/LLMPreference")
const GeneralLLMPreference = lazy(
() => import("@/pages/GeneralSettings/LLMPreference")
);
const GeneralEmbeddingPreference = lazy(() =>
import("@/pages/GeneralSettings/EmbeddingPreference")
const GeneralEmbeddingPreference = lazy(
() => import("@/pages/GeneralSettings/EmbeddingPreference")
);
const GeneralVectorDatabase = lazy(() =>
import("@/pages/GeneralSettings/VectorDatabase")
const GeneralVectorDatabase = lazy(
() => import("@/pages/GeneralSettings/VectorDatabase")
);
const GeneralExportImport = lazy(() =>
import("@/pages/GeneralSettings/ExportImport")
const GeneralExportImport = lazy(
() => import("@/pages/GeneralSettings/ExportImport")
);
const GeneralSecurity = lazy(() => import("@/pages/GeneralSettings/Security"));
const DataConnectors = lazy(() =>
import("@/pages/GeneralSettings/DataConnectors")
const DataConnectors = lazy(
() => import("@/pages/GeneralSettings/DataConnectors")
);
const DataConnectorSetup = lazy(() =>
import("@/pages/GeneralSettings/DataConnectors/Connectors")
const DataConnectorSetup = lazy(
() => import("@/pages/GeneralSettings/DataConnectors/Connectors")
);
const OnboardingFlow = lazy(() => import("@/pages/OnboardingFlow"));
export default function App() {
return (

View File

@ -0,0 +1,103 @@
import { useState, useEffect } from "react";
import System from "@/models/system";
export default function MistralOptions({ settings }) {
const [inputValue, setInputValue] = useState(settings?.MistralApiKey);
const [mistralKey, setMistralKey] = useState(settings?.MistralApiKey);
return (
<div className="flex gap-x-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Mistral API Key
</label>
<input
type="password"
name="MistralApiKey"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Mistral API Key"
defaultValue={settings?.MistralApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
onChange={(e) => setInputValue(e.target.value)}
onBlur={() => setMistralKey(inputValue)}
/>
</div>
<MistralModelSelection settings={settings} apiKey={mistralKey} />
</div>
);
}
function MistralModelSelection({ apiKey, settings }) {
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
if (!apiKey) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels(
"mistral",
typeof apiKey === "boolean" ? null : apiKey
);
setCustomModels(models || []);
setLoading(false);
}
findCustomModels();
}, [apiKey]);
if (loading || customModels.length == 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="MistralModelPref"
disabled={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
{!!apiKey
? "-- loading available models --"
: "-- waiting for API key --"}
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="MistralModelPref"
required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{customModels.length > 0 && (
<optgroup label="Available Mistral Models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={settings?.MistralModelPref === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
</select>
</div>
);
}

View File

@ -54,6 +54,7 @@ function NativeModelSelection({ settings }) {
}
return (
<>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Model Selection
@ -80,5 +81,22 @@ function NativeModelSelection({ settings }) {
)}
</select>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="NativeLLMTokenLimit"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.NativeLLMTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</>
);
}

View File

@ -0,0 +1,95 @@
import System from "@/models/system";
import { useState, useEffect } from "react";
export default function TogetherAiOptions({ settings }) {
return (
<div className="flex gap-x-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Together AI API Key
</label>
<input
type="password"
name="TogetherAiApiKey"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Together AI API Key"
defaultValue={settings?.TogetherAiApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<TogetherAiModelSelection settings={settings} />
</div>
);
}
function TogetherAiModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
setLoading(true);
const { models } = await System.customModels("togetherai");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
findCustomModels();
}, []);
if (loading || Object.keys(groupedModels).length === 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="TogetherAiModelPref"
disabled={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="TogetherAiModelPref"
required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.entries(groupedModels).map(([organization, models]) => (
<optgroup key={organization} label={organization}>
{models.map((model) => (
<option
key={model.id}
value={model.id}
selected={settings.TogetherAiModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}

View File

@ -6,7 +6,9 @@ import Directory from "./Directory";
import showToast from "../../../../utils/toast";
import WorkspaceDirectory from "./WorkspaceDirectory";
const COST_PER_TOKEN = 0.0004;
// OpenAI Cost per token for text-ada-embedding
// ref: https://openai.com/pricing#:~:text=%C2%A0/%201K%20tokens-,Embedding%20models,-Build%20advanced%20search
const COST_PER_TOKEN = 0.0000001; // $0.0001 / 1K tokens
export default function DocumentSettings({
workspace,

View File

@ -0,0 +1,120 @@
import useGetProviderModels, {
DISABLED_PROVIDERS,
} from "./useGetProviderModels";
export default function ChatModelSelection({
settings,
workspace,
setHasChanges,
}) {
const { defaultModels, customModels, loading } = useGetProviderModels(
settings?.LLMProvider
);
if (DISABLED_PROVIDERS.includes(settings?.LLMProvider)) return null;
if (loading) {
return (
<div>
<div className="flex flex-col">
<label
htmlFor="name"
className="block text-sm font-medium text-white"
>
Chat model
</label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
The specific chat model that will be used for this workspace. If
empty, will use the system LLM preference.
</p>
</div>
<select
name="chatModel"
required={true}
disabled={true}
className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- waiting for models --
</option>
</select>
</div>
);
}
return (
<div>
<div className="flex flex-col">
<label htmlFor="name" className="block text-sm font-medium text-white">
Chat model{" "}
<span className="font-normal">({settings?.LLMProvider})</span>
</label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
The specific chat model that will be used for this workspace. If
empty, will use the system LLM preference.
</p>
</div>
<select
name="chatModel"
required={true}
onChange={() => {
setHasChanges(true);
}}
className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
>
<option disabled={true} selected={workspace?.chatModel === null}>
System default
</option>
{defaultModels.length > 0 && (
<optgroup label="General models">
{defaultModels.map((model) => {
return (
<option
key={model}
value={model}
selected={workspace?.chatModel === model}
>
{model}
</option>
);
})}
</optgroup>
)}
{Array.isArray(customModels) && customModels.length > 0 && (
<optgroup label="Custom models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={workspace?.chatModel === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
{/* For providers like TogetherAi where we partition model by creator entity. */}
{!Array.isArray(customModels) &&
Object.keys(customModels).length > 0 && (
<>
{Object.entries(customModels).map(([organization, models]) => (
<optgroup key={organization} label={organization}>
{models.map((model) => (
<option
key={model.id}
value={model.id}
selected={workspace?.chatModel === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</>
)}
</select>
</div>
);
}

View File

@ -0,0 +1,49 @@
import System from "@/models/system";
import { useEffect, useState } from "react";
// Providers which cannot use this feature for workspace<>model selection
export const DISABLED_PROVIDERS = ["azure", "lmstudio"];
const PROVIDER_DEFAULT_MODELS = {
openai: ["gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview", "gpt-4-32k"],
gemini: ["gemini-pro"],
anthropic: ["claude-2", "claude-instant-1"],
azure: [],
lmstudio: [],
localai: [],
ollama: [],
togetherai: [],
native: [],
};
// For togetherAi, which has a large model list - we subgroup the options
// by their creator organization (eg: Meta, Mistral, etc)
// which makes selection easier to read.
function groupModels(models) {
return models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
}
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
async function fetchProviderModels() {
if (!provider) return;
const { models = [] } = await System.customModels(provider);
if (PROVIDER_DEFAULT_MODELS.hasOwnProperty(provider))
setDefaultModels(PROVIDER_DEFAULT_MODELS[provider]);
provider === "togetherai"
? setCustomModels(groupModels(models))
: setCustomModels(models);
setLoading(false);
}
fetchProviderModels();
}, [provider]);
return { defaultModels, customModels, loading };
}

View File

@ -6,6 +6,7 @@ import System from "../../../../models/system";
import PreLoader from "../../../Preloader";
import { useParams } from "react-router-dom";
import showToast from "../../../../utils/toast";
import ChatModelPreference from "./ChatModelPreference";
// Ensure that a type is correct before sending the body
// to the backend.
@ -26,24 +27,21 @@ function castToType(key, value) {
return definitions[key].cast(value);
}
export default function WorkspaceSettings({ workspace }) {
function recommendedSettings(provider = null) {
switch (provider) {
case "mistral":
return { temp: 0 };
default:
return { temp: 0.7 };
}
}
export default function WorkspaceSettings({ active, workspace, settings }) {
const { slug } = useParams();
const formEl = useRef(null);
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
const [totalVectors, setTotalVectors] = useState(null);
const [canDelete, setCanDelete] = useState(false);
useEffect(() => {
async function fetchKeys() {
const canDelete = await System.getCanDeleteWorkspaces();
setCanDelete(canDelete);
const totalVectors = await System.totalIndexes();
setTotalVectors(totalVectors);
}
fetchKeys();
}, []);
const defaults = recommendedSettings(settings?.LLMProvider);
const handleUpdate = async (e) => {
setSaving(true);
@ -89,6 +87,9 @@ export default function WorkspaceSettings({ workspace }) {
<h3 className="text-white text-sm font-semibold">
Vector database identifier
</h3>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
{" "}
</p>
<p className="text-white text-opacity-60 text-sm font-medium">
{workspace?.slug}
</p>
@ -101,13 +102,7 @@ export default function WorkspaceSettings({ workspace }) {
<p className="text-white text-opacity-60 text-xs font-medium my-[2px]">
Total number of vectors in your vector database.
</p>
{totalVectors !== null ? (
<p className="text-white text-opacity-60 text-sm font-medium">
{totalVectors}
</p>
) : (
<PreLoader size="4" />
)}
<VectorCount reload={active} workspace={workspace} />
</div>
</div>
</div>
@ -115,6 +110,11 @@ export default function WorkspaceSettings({ workspace }) {
<div className="flex">
<div className="flex flex-col gap-y-4 w-1/2">
<div className="w-3/4 flex flex-col gap-y-4">
<ChatModelPreference
settings={settings}
workspace={workspace}
setHasChanges={setHasChanges}
/>
<div>
<div className="flex flex-col">
<label
@ -153,20 +153,20 @@ export default function WorkspaceSettings({ workspace }) {
This setting controls how "random" or dynamic your chat
responses will be.
<br />
The higher the number (2.0 maximum) the more random and
The higher the number (1.0 maximum) the more random and
incoherent.
<br />
<i>Recommended: 0.7</i>
<i>Recommended: {defaults.temp}</i>
</p>
</div>
<input
name="openAiTemp"
type="number"
min={0.0}
max={2.0}
max={1.0}
step={0.1}
onWheel={(e) => e.target.blur()}
defaultValue={workspace?.openAiTemp ?? 0.7}
defaultValue={workspace?.openAiTemp ?? defaults.temp}
className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
placeholder="0.7"
required={true}
@ -275,15 +275,7 @@ export default function WorkspaceSettings({ workspace }) {
</div>
</div>
<div className="flex items-center justify-between p-2 md:p-6 space-x-2 border-t rounded-b border-gray-600">
{canDelete && (
<button
onClick={deleteWorkspace}
type="button"
className="transition-all duration-300 border border-transparent rounded-lg whitespace-nowrap text-sm px-5 py-2.5 focus:z-10 bg-transparent text-white hover:text-white hover:bg-red-600"
>
Delete Workspace
</button>
)}
<DeleteWorkspace workspace={workspace} onClick={deleteWorkspace} />
{hasChanges && (
<button
type="submit"
@ -296,3 +288,43 @@ export default function WorkspaceSettings({ workspace }) {
</form>
);
}
function DeleteWorkspace({ workspace, onClick }) {
const [canDelete, setCanDelete] = useState(false);
useEffect(() => {
async function fetchKeys() {
const canDelete = await System.getCanDeleteWorkspaces();
setCanDelete(canDelete);
}
fetchKeys();
}, [workspace?.slug]);
if (!canDelete) return null;
return (
<button
onClick={onClick}
type="button"
className="transition-all duration-300 border border-transparent rounded-lg whitespace-nowrap text-sm px-5 py-2.5 focus:z-10 bg-transparent text-white hover:text-white hover:bg-red-600"
>
Delete Workspace
</button>
);
}
function VectorCount({ reload, workspace }) {
const [totalVectors, setTotalVectors] = useState(null);
useEffect(() => {
async function fetchVectorCount() {
const totalVectors = await System.totalIndexes(workspace.slug);
setTotalVectors(totalVectors);
}
fetchVectorCount();
}, [workspace?.slug, reload]);
if (totalVectors === null) return <PreLoader size="4" />;
return (
<p className="text-white text-opacity-60 text-sm font-medium">
{totalVectors}
</p>
);
}

View File

@ -114,7 +114,11 @@ const ManageWorkspace = ({ hideModal = noop, providedSlug = null }) => {
/>
</div>
<div className={selectedTab === "settings" ? "" : "hidden"}>
<WorkspaceSettings workspace={workspace} fileTypes={fileTypes} />
<WorkspaceSettings
active={selectedTab === "settings"} // To force reload live sub-components like VectorCount
workspace={workspace}
settings={settings}
/>
</div>
</Suspense>
</div>

View File

@ -37,7 +37,7 @@ export default function PasswordModal({ mode = "single" }) {
export function usePasswordModal() {
const [auth, setAuth] = useState({
loading: true,
required: false,
requiresAuth: false,
mode: "single",
});

View File

@ -0,0 +1,52 @@
export default function MilvusDBOptions({ settings }) {
return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Milvus DB Address
</label>
<input
type="text"
name="MilvusAddress"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://localhost:19530"
defaultValue={settings?.MilvusAddress}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Milvus Username
</label>
<input
type="text"
name="MilvusUsername"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="username"
defaultValue={settings?.MilvusUsername}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Milvus Password
</label>
<input
type="password"
name="MilvusPassword"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="password"
defaultValue={settings?.MilvusPassword ? "*".repeat(20) : ""}
autoComplete="off"
spellCheck={false}
/>
</div>
</div>
</div>
);
}

View File

@ -0,0 +1,38 @@
export default function ZillizCloudOptions({ settings }) {
return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Cluster Endpoint
</label>
<input
type="text"
name="ZillizEndpoint"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="https://sample.api.gcp-us-west1.zillizcloud.com"
defaultValue={settings?.ZillizEndpoint}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
API Token
</label>
<input
type="password"
name="ZillizApiToken"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Zilliz cluster API Token"
defaultValue={settings?.ZillizApiToken ? "*".repeat(20) : ""}
autoComplete="off"
spellCheck={false}
/>
</div>
</div>
</div>
);
}

View File

@ -119,21 +119,19 @@ function CitationDetailModal({ source, onClose }) {
className="bg-transparent outline-none fixed top-0 left-0 w-full h-full flex items-center justify-center z-10"
>
<div className="relative w-full max-w-2xl bg-main-gradient rounded-lg shadow border border-white/10 overflow-hidden">
<div className="flex items-start justify-between p-6 border-b rounded-t border-gray-500/50">
<div className="flex flex-col flex-grow mr-4">
<div className="relative p-6 border-b rounded-t border-gray-500/50">
<h3 className="text-xl font-semibold text-white overflow-hidden overflow-ellipsis whitespace-nowrap">
{truncate(title, 52)}
{truncate(title, 45)}
</h3>
{references > 1 && (
<p className="text-xs text-gray-400 mt-2">
Referenced {references} times.
</p>
)}
</div>
<button
onClick={handleModalClose}
type="button"
className="transition-all duration-300 text-gray-400 bg-transparent hover:border-white/60 rounded-lg text-sm p-1.5 ml-auto inline-flex items-center bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
className="absolute top-6 right-6 transition-all duration-300 text-gray-400 bg-transparent hover:border-white/60 rounded-lg text-sm p-1.5 inline-flex items-center bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
>
<X className="text-gray-300 text-lg" />
</button>

View File

@ -0,0 +1,43 @@
import useCopyText from "@/hooks/useCopyText";
import { Check, ClipboardText } from "@phosphor-icons/react";
import { memo } from "react";
import { Tooltip } from "react-tooltip";
const Actions = ({ message }) => {
return (
<div className="flex justify-start items-center gap-x-4">
<CopyMessage message={message} />
{/* Other actions to go here later. */}
</div>
);
};
function CopyMessage({ message }) {
const { copied, copyText } = useCopyText();
return (
<>
<div className="mt-3 relative">
<button
data-tooltip-id="copy-assistant-text"
data-tooltip-content="Copy"
className="text-zinc-300"
onClick={() => copyText(message)}
>
{copied ? (
<Check size={18} className="mb-1" />
) : (
<ClipboardText size={18} className="mb-1" />
)}
</button>
</div>
<Tooltip
id="copy-assistant-text"
place="bottom"
delayShow={300}
className="tooltip !text-xs"
/>
</>
);
}
export default memo(Actions);

View File

@ -1,12 +1,15 @@
import { memo, forwardRef } from "react";
import React, { memo, forwardRef } from "react";
import { Warning } from "@phosphor-icons/react";
import Jazzicon from "../../../../UserIcon";
import Actions from "./Actions";
import renderMarkdown from "@/utils/chat/markdown";
import { userFromStorage } from "@/utils/request";
import Citations from "../Citation";
import { AI_BACKGROUND_COLOR, USER_BACKGROUND_COLOR } from "@/utils/constants";
import { v4 } from "uuid";
import createDOMPurify from "dompurify";
const DOMPurify = createDOMPurify(window);
const HistoricalMessage = forwardRef(
(
{ uuid = v4(), message, role, workspace, sources = [], error = false },
@ -45,10 +48,18 @@ const HistoricalMessage = forwardRef(
) : (
<span
className={`whitespace-pre-line text-white font-normal text-sm md:text-sm flex flex-col gap-y-1 mt-2`}
dangerouslySetInnerHTML={{ __html: renderMarkdown(message) }}
dangerouslySetInnerHTML={{
__html: DOMPurify.sanitize(renderMarkdown(message)),
}}
/>
)}
</div>
{role === "assistant" && (
<div className="flex gap-x-5">
<div className="relative w-[35px] h-[35px] rounded-full flex-shrink-0 overflow-hidden" />
<Actions message={DOMPurify.sanitize(message)} />
</div>
)}
{role === "assistant" && <Citations sources={sources} />}
</div>
</div>

View File

@ -17,23 +17,24 @@ export default function ChatHistory({ history = [], workspace }) {
}, [history]);
const handleScroll = () => {
const isBottom =
chatHistoryRef.current.scrollHeight - chatHistoryRef.current.scrollTop ===
const diff =
chatHistoryRef.current.scrollHeight -
chatHistoryRef.current.scrollTop -
chatHistoryRef.current.clientHeight;
// Fuzzy margin for what qualifies as "bottom". Stronger than straight comparison since that may change over time.
const isBottom = diff <= 10;
setIsAtBottom(isBottom);
};
const debouncedScroll = debounce(handleScroll, 100);
useEffect(() => {
function watchScrollEvent() {
if (!chatHistoryRef.current) return null;
const chatHistoryElement = chatHistoryRef.current;
if (!chatHistoryElement) return null;
chatHistoryElement.addEventListener("scroll", debouncedScroll);
return () => {
chatHistoryElement.removeEventListener("scroll", debouncedScroll);
debouncedScroll.cancel();
};
}
watchScrollEvent();
}, []);
const scrollToBottom = () => {
@ -49,11 +50,11 @@ export default function ChatHistory({ history = [], workspace }) {
return (
<div className="flex flex-col h-full md:mt-0 pb-48 w-full justify-end items-center">
<div className="flex flex-col items-start">
<p className="text-white/60 text-lg font-base -ml-6 py-4">
<p className="text-white/60 text-lg font-base py-4">
Welcome to your new workspace.
</p>
<div className="w-full text-center">
<p className="text-white/60 text-lg font-base inline-flex items-center gap-x-2">
<p className="text-white/60 text-lg font-base inline-grid md:inline-flex items-center gap-x-2">
To get started either{" "}
<span
className="underline font-medium cursor-pointer"
@ -114,7 +115,6 @@ export default function ChatHistory({ history = [], workspace }) {
/>
);
})}
{showing && (
<ManageWorkspace hideModal={hideModal} providedSlug={workspace.slug} />
)}

View File

@ -0,0 +1,4 @@
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect x="1.02539" y="1.43799" width="17.252" height="17.252" rx="2" stroke="white" stroke-opacity="1.0" stroke-width="1.5"/>
<path d="M6.70312 14.5408L12.5996 5.8056" stroke="white" stroke-opacity="1.0" stroke-width="1.5" stroke-linecap="round"/>
</svg>

After

Width:  |  Height:  |  Size: 350 B

View File

@ -0,0 +1,68 @@
import { useEffect, useRef, useState } from "react";
import SlashCommandIcon from "./icons/slash-commands-icon.svg";
export default function SlashCommandsButton({ showing, setShowSlashCommand }) {
return (
<div
id="slash-cmd-btn"
onClick={() => setShowSlashCommand(!showing)}
className={`flex justify-center items-center opacity-60 hover:opacity-100 cursor-pointer ${
showing ? "!opacity-100" : ""
}`}
>
<img
src={SlashCommandIcon}
className="w-6 h-6 pointer-events-none"
alt="Slash commands button"
/>
</div>
);
}
export function SlashCommands({ showing, setShowing, sendCommand }) {
const cmdRef = useRef(null);
useEffect(() => {
function listenForOutsideClick() {
if (!showing || !cmdRef.current) return false;
document.addEventListener("click", closeIfOutside);
}
listenForOutsideClick();
}, [showing, cmdRef.current]);
if (!showing) return null;
const closeIfOutside = ({ target }) => {
if (target.id === "slash-cmd-btn") return;
const isOutside = !cmdRef?.current?.contains(target);
if (!isOutside) return;
setShowing(false);
};
return (
<div className="w-full flex justify-center absolute bottom-[130px] md:bottom-[150px] left-0 z-10 px-4">
<div
ref={cmdRef}
className="w-[600px] p-2 bg-zinc-800 rounded-2xl shadow flex-col justify-center items-start gap-2.5 inline-flex"
>
<button
onClick={() => {
setShowing(false);
sendCommand("/reset", true);
}}
className="w-full hover:cursor-pointer hover:bg-zinc-700 px-2 py-2 rounded-xl flex flex-col justify-start"
>
<div className="w-full flex-col text-left flex pointer-events-none">
<div className="text-white text-sm font-bold">/reset</div>
<div className="text-white text-opacity-60 text-sm">
Clear your chat history and begin a new chat
</div>
</div>
</button>
</div>
</div>
);
}
export function useSlashCommands() {
const [showSlashCommand, setShowSlashCommand] = useState(false);
return { showSlashCommand, setShowSlashCommand };
}

View File

@ -11,6 +11,10 @@ import ManageWorkspace, {
useManageWorkspaceModal,
} from "../../../Modals/MangeWorkspace";
import useUser from "@/hooks/useUser";
import SlashCommandsButton, {
SlashCommands,
useSlashCommands,
} from "./SlashCommands";
export default function PromptInput({
workspace,
@ -19,7 +23,9 @@ export default function PromptInput({
onChange,
inputDisabled,
buttonDisabled,
sendCommand,
}) {
const { showSlashCommand, setShowSlashCommand } = useSlashCommands();
const { showing, showModal, hideModal } = useManageWorkspaceModal();
const formRef = useRef(null);
const [_, setFocused] = useState(false);
@ -49,7 +55,12 @@ export default function PromptInput({
};
return (
<div className="w-full fixed md:absolute bottom-0 left-0 z-10 md:z-0 flex justify-center items-center overflow-hidden">
<div className="w-full fixed md:absolute bottom-0 left-0 z-10 md:z-0 flex justify-center items-center">
<SlashCommands
showing={showSlashCommand}
setShowing={setShowSlashCommand}
sendCommand={sendCommand}
/>
<form
onSubmit={handleSubmit}
className="flex flex-col gap-y-1 rounded-t-lg md:w-3/4 w-full mx-auto max-w-xl"
@ -95,17 +106,12 @@ export default function PromptInput({
weight="fill"
/>
)}
<ChatModeSelector workspace={workspace} />
{/* <TextT
className="w-7 h-7 text-white/30 cursor-not-allowed"
weight="fill"
/> */}
<SlashCommandsButton
showing={showSlashCommand}
setShowSlashCommand={setShowSlashCommand}
/>
</div>
{/* <Microphone
className="w-7 h-7 text-white/30 cursor-not-allowed"
weight="fill"
/> */}
</div>
</div>
</div>

View File

@ -10,7 +10,6 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
const [message, setMessage] = useState("");
const [loadingResponse, setLoadingResponse] = useState(false);
const [chatHistory, setChatHistory] = useState(knownHistory);
const handleMessageChange = (event) => {
setMessage(event.target.value);
};
@ -36,6 +35,30 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
setLoadingResponse(true);
};
const sendCommand = async (command, submit = false) => {
if (!command || command === "") return false;
if (!submit) {
setMessage(command);
return;
}
const prevChatHistory = [
...chatHistory,
{ content: command, role: "user" },
{
content: "",
role: "assistant",
pending: true,
userMessage: command,
animate: true,
},
];
setChatHistory(prevChatHistory);
setMessage("");
setLoadingResponse(true);
};
useEffect(() => {
async function fetchReply() {
const promptMessage =
@ -48,21 +71,6 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
return false;
}
// TODO: Delete this snippet once we have streaming stable.
// const chatResult = await Workspace.sendChat(
// workspace,
// promptMessage.userMessage,
// window.localStorage.getItem(`workspace_chat_mode_${workspace.slug}`) ??
// "chat",
// )
// handleChat(
// chatResult,
// setLoadingResponse,
// setChatHistory,
// remHistory,
// _chatHistory
// )
await Workspace.streamChat(
workspace,
promptMessage.userMessage,
@ -97,6 +105,7 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
onChange={handleMessageChange}
inputDisabled={loadingResponse}
buttonDisabled={loadingResponse}
sendCommand={sendCommand}
/>
</div>
</div>

View File

@ -59,5 +59,41 @@ export default function WorkspaceChat({ loading, workspace }) {
);
}
setEventDelegatorForCodeSnippets();
return <ChatContainer workspace={workspace} knownHistory={history} />;
}
// Enables us to safely markdown and sanitize all responses without risk of injection
// but still be able to attach a handler to copy code snippets on all elements
// that are code snippets.
function copyCodeSnippet(uuid) {
const target = document.querySelector(`[data-code="${uuid}"]`);
if (!target) return false;
const markdown =
target.parentElement?.parentElement?.querySelector(
"pre:first-of-type"
)?.innerText;
if (!markdown) return false;
window.navigator.clipboard.writeText(markdown);
target.classList.add("text-green-500");
const originalText = target.innerHTML;
target.innerText = "Copied!";
target.setAttribute("disabled", true);
setTimeout(() => {
target.classList.remove("text-green-500");
target.innerHTML = originalText;
target.removeAttribute("disabled");
}, 2500);
}
// Listens and hunts for all data-code-snippet clicks.
function setEventDelegatorForCodeSnippets() {
document?.addEventListener("click", function (e) {
const target = e.target.closest("[data-code-snippet]");
const uuidCode = target?.dataset?.code;
if (!uuidCode) return false;
copyCodeSnippet(uuidCode);
});
}

View File

@ -0,0 +1,15 @@
import { useState } from "react";
export default function useCopyText(delay = 2500) {
const [copied, setCopied] = useState(false);
const copyText = async (content) => {
if (!content) return;
navigator?.clipboard?.writeText(content);
setCopied(content);
setTimeout(() => {
setCopied(false);
}, delay);
};
return { copyText, copied };
}

View File

@ -6,8 +6,18 @@ html,
body {
padding: 0;
margin: 0;
font-family: "plus-jakarta-sans", -apple-system, BlinkMacSystemFont, Segoe UI,
Roboto, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans, Helvetica Neue,
font-family:
"plus-jakarta-sans",
-apple-system,
BlinkMacSystemFont,
Segoe UI,
Roboto,
Oxygen,
Ubuntu,
Cantarell,
Fira Sans,
Droid Sans,
Helvetica Neue,
sans-serif;
background-color: white;
}
@ -389,3 +399,7 @@ dialog::backdrop {
.rti--container {
@apply !bg-zinc-900 !text-white !placeholder-white !placeholder-opacity-60 !text-sm !rounded-lg !p-2.5;
}
.tooltip {
@apply !bg-black !text-white !py-2 !px-3 !rounded-md;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -1,4 +1,4 @@
import { API_BASE, AUTH_TIMESTAMP } from "@/utils/constants";
import { API_BASE, AUTH_TIMESTAMP, fullApiUrl } from "@/utils/constants";
import { baseHeaders } from "@/utils/request";
import DataConnector from "./dataConnector";
@ -9,8 +9,10 @@ const System = {
.then((res) => res?.online || false)
.catch(() => false);
},
totalIndexes: async function () {
return await fetch(`${API_BASE}/system/system-vectors`, {
totalIndexes: async function (slug = null) {
const url = new URL(`${fullApiUrl()}/system/system-vectors`);
if (!!slug) url.searchParams.append("slug", encodeURIComponent(slug));
return await fetch(url.toString(), {
headers: baseHeaders(),
})
.then((res) => {

View File

@ -168,22 +168,6 @@ const Workspace = {
const data = await response.json();
return { response, data };
},
// TODO: Deprecated and should be removed from frontend.
sendChat: async function ({ slug }, message, mode = "query") {
const chatResult = await fetch(`${API_BASE}/workspace/${slug}/chat`, {
method: "POST",
body: JSON.stringify({ message, mode }),
headers: baseHeaders(),
})
.then((res) => res.json())
.catch((e) => {
console.error(e);
return null;
});
return chatResult;
},
};
export default Workspace;

View File

@ -11,6 +11,8 @@ import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
@ -20,6 +22,8 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { MagnifyingGlass } from "@phosphor-icons/react";
@ -28,19 +32,17 @@ export default function GeneralLLMPreference() {
const [hasChanges, setHasChanges] = useState(false);
const [settings, setSettings] = useState(null);
const [loading, setLoading] = useState(true);
const [searchQuery, setSearchQuery] = useState("");
const [filteredLLMs, setFilteredLLMs] = useState([]);
const [selectedLLM, setSelectedLLM] = useState(null);
const isHosted = window.location.hostname.includes("useanything.com");
const handleSubmit = async (e) => {
e.preventDefault();
const form = e.target;
const data = {};
const data = { LLMProvider: selectedLLM };
const formData = new FormData(form);
data.LLMProvider = selectedLLM;
for (var [key, value] of formData.entries()) data[key] = value;
const { error } = await System.updateSystem(data);
setSaving(true);
@ -127,6 +129,20 @@ export default function GeneralLLMPreference() {
options: <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Together AI",
value: "togetherai",
logo: TogetherAILogo,
options: <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Native",
value: "native",

View File

@ -8,6 +8,8 @@ import PineconeLogo from "@/media/vectordbs/pinecone.png";
import LanceDbLogo from "@/media/vectordbs/lancedb.png";
import WeaviateLogo from "@/media/vectordbs/weaviate.png";
import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
import PreLoader from "@/components/Preloader";
import ChangeWarningModal from "@/components/ChangeWarning";
import { MagnifyingGlass } from "@phosphor-icons/react";
@ -17,6 +19,8 @@ import PineconeDBOptions from "@/components/VectorDBSelection/PineconeDBOptions"
import QDrantDBOptions from "@/components/VectorDBSelection/QDrantDBOptions";
import WeaviateDBOptions from "@/components/VectorDBSelection/WeaviateDBOptions";
import VectorDBItem from "@/components/VectorDBSelection/VectorDBItem";
import MilvusDBOptions from "@/components/VectorDBSelection/MilvusDBOptions";
import ZillizCloudOptions from "@/components/VectorDBSelection/ZillizCloudOptions";
export default function GeneralVectorDatabase() {
const [saving, setSaving] = useState(false);
@ -31,7 +35,6 @@ export default function GeneralVectorDatabase() {
useEffect(() => {
async function fetchKeys() {
const _settings = await System.keys();
console.log(_settings);
setSettings(_settings);
setSelectedVDB(_settings?.VectorDB || "lancedb");
setHasEmbeddings(_settings?.HasExistingEmbeddings || false);
@ -64,6 +67,14 @@ export default function GeneralVectorDatabase() {
options: <PineconeDBOptions settings={settings} />,
description: "100% cloud-based vector database for enterprise use cases.",
},
{
name: "Zilliz Cloud",
value: "zilliz",
logo: ZillizLogo,
options: <ZillizCloudOptions settings={settings} />,
description:
"Cloud hosted vector database built for enterprise with SOC 2 compliance.",
},
{
name: "QDrant",
value: "qdrant",
@ -79,6 +90,13 @@ export default function GeneralVectorDatabase() {
description:
"Open source local and cloud hosted multi-modal vector database.",
},
{
name: "Milvus",
value: "milvus",
logo: MilvusLogo,
options: <MilvusDBOptions settings={settings} />,
description: "Open-source, highly scalable, and blazing fast.",
},
];
const updateVectorChoice = (selection) => {

View File

@ -1,9 +1,13 @@
import React from "react";
import PasswordModal, { usePasswordModal } from "@/components/Modals/Password";
import { FullScreenLoader } from "@/components/Preloader";
import { Navigate } from "react-router-dom";
import paths from "@/utils/paths";
export default function Login() {
const { loading, mode } = usePasswordModal();
const { loading, requiresAuth, mode } = usePasswordModal();
if (loading) return <FullScreenLoader />;
if (requiresAuth === false) return <Navigate to={paths.home()} />;
return <PasswordModal mode={mode} />;
}

View File

@ -6,13 +6,17 @@ import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
import ChromaLogo from "@/media/vectordbs/chroma.png";
import PineconeLogo from "@/media/vectordbs/pinecone.png";
import LanceDbLogo from "@/media/vectordbs/lancedb.png";
import WeaviateLogo from "@/media/vectordbs/weaviate.png";
import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom";
@ -25,7 +29,7 @@ const LLM_SELECTION_PRIVACY = {
name: "OpenAI",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in responses are visible to OpenAI",
"Your prompts and document text used in response creation are visible to OpenAI",
],
logo: OpenAiLogo,
},
@ -41,7 +45,7 @@ const LLM_SELECTION_PRIVACY = {
name: "Anthropic",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in responses are visible to Anthropic",
"Your prompts and document text used in response creation are visible to Anthropic",
],
logo: AnthropicLogo,
},
@ -49,7 +53,7 @@ const LLM_SELECTION_PRIVACY = {
name: "Google Gemini",
description: [
"Your chats are de-identified and used in training",
"Your prompts and document text are visible in responses to Google",
"Your prompts and document text used in response creation are visible to Google",
],
logo: GeminiLogo,
},
@ -81,6 +85,21 @@ const LLM_SELECTION_PRIVACY = {
],
logo: AnythingLLMIcon,
},
togetherai: {
name: "TogetherAI",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in response creation are visible to TogetherAI",
],
logo: TogetherAILogo,
},
mistral: {
name: "Mistral",
description: [
"Your prompts and document text used in response creation are visible to Mistral",
],
logo: MistralLogo,
},
};
const VECTOR_DB_PRIVACY = {
@ -114,6 +133,20 @@ const VECTOR_DB_PRIVACY = {
],
logo: WeaviateLogo,
},
milvus: {
name: "Milvus",
description: [
"Your vectors and document text are stored on your Milvus instance (cloud or self-hosted)",
],
logo: MilvusLogo,
},
zilliz: {
name: "Zilliz Cloud",
description: [
"Your vectors and document text are stored on your Zilliz cloud cluster.",
],
logo: ZillizLogo,
},
lancedb: {
name: "LanceDB",
description: [

View File

@ -7,7 +7,9 @@ import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
@ -16,11 +18,13 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
import paths from "@/utils/paths";
import showToast from "@/utils/toast";
import { useNavigate } from "react-router-dom";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
const TITLE = "LLM Preference";
const DESCRIPTION =
@ -100,6 +104,20 @@ export default function LLMPreference({
options: <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Together AI",
value: "togetherai",
logo: TogetherAILogo,
options: <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Native",
value: "native",

View File

@ -5,6 +5,8 @@ import PineconeLogo from "@/media/vectordbs/pinecone.png";
import LanceDbLogo from "@/media/vectordbs/lancedb.png";
import WeaviateLogo from "@/media/vectordbs/weaviate.png";
import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
import System from "@/models/system";
import paths from "@/utils/paths";
import PineconeDBOptions from "@/components/VectorDBSelection/PineconeDBOptions";
@ -12,6 +14,8 @@ import ChromaDBOptions from "@/components/VectorDBSelection/ChromaDBOptions";
import QDrantDBOptions from "@/components/VectorDBSelection/QDrantDBOptions";
import WeaviateDBOptions from "@/components/VectorDBSelection/WeaviateDBOptions";
import LanceDBOptions from "@/components/VectorDBSelection/LanceDBOptions";
import MilvusOptions from "@/components/VectorDBSelection/MilvusDBOptions";
import ZillizCloudOptions from "@/components/VectorDBSelection/ZillizCloudOptions";
import showToast from "@/utils/toast";
import { useNavigate } from "react-router-dom";
import VectorDBItem from "@/components/VectorDBSelection/VectorDBItem";
@ -66,6 +70,14 @@ export default function VectorDatabaseConnection({
options: <PineconeDBOptions settings={settings} />,
description: "100% cloud-based vector database for enterprise use cases.",
},
{
name: "Zilliz Cloud",
value: "zilliz",
logo: ZillizLogo,
options: <ZillizCloudOptions settings={settings} />,
description:
"Cloud hosted vector database built for enterprise with SOC 2 compliance.",
},
{
name: "QDrant",
value: "qdrant",
@ -81,6 +93,13 @@ export default function VectorDatabaseConnection({
description:
"Open source local and cloud hosted multi-modal vector database.",
},
{
name: "Milvus",
value: "milvus",
logo: MilvusLogo,
options: <MilvusOptions settings={settings} />,
description: "Open-source, highly scalable, and blazing fast.",
},
];
function handleForward() {

View File

@ -1,16 +1,26 @@
import { ArrowLeft, ArrowRight } from "@phosphor-icons/react";
import { lazy, useState } from "react";
import { useState } from "react";
import { isMobile } from "react-device-detect";
import Home from "./Home";
import LLMPreference from "./LLMPreference";
import EmbeddingPreference from "./EmbeddingPreference";
import VectorDatabaseConnection from "./VectorDatabaseConnection";
import CustomLogo from "./CustomLogo";
import UserSetup from "./UserSetup";
import DataHandling from "./DataHandling";
import Survey from "./Survey";
import CreateWorkspace from "./CreateWorkspace";
const OnboardingSteps = {
home: lazy(() => import("./Home")),
"llm-preference": lazy(() => import("./LLMPreference")),
"embedding-preference": lazy(() => import("./EmbeddingPreference")),
"vector-database": lazy(() => import("./VectorDatabaseConnection")),
"custom-logo": lazy(() => import("./CustomLogo")),
"user-setup": lazy(() => import("./UserSetup")),
"data-handling": lazy(() => import("./DataHandling")),
survey: lazy(() => import("./Survey")),
"create-workspace": lazy(() => import("./CreateWorkspace")),
home: Home,
"llm-preference": LLMPreference,
"embedding-preference": EmbeddingPreference,
"vector-database": VectorDatabaseConnection,
"custom-logo": CustomLogo,
"user-setup": UserSetup,
"data-handling": DataHandling,
survey: Survey,
"create-workspace": CreateWorkspace,
};
export default OnboardingSteps;

View File

@ -7,47 +7,44 @@ import { v4 } from "uuid";
const markdown = markdownIt({
html: true,
typographer: true,
highlight: function (str, lang) {
highlight: function (code, lang) {
const uuid = v4();
if (lang && hljs.getLanguage(lang)) {
try {
return (
`<div class="whitespace-pre-line w-full rounded-lg bg-black-900 px-4 pt-10 pb-4 relative font-mono font-normal text-sm text-slate-200"><div class="w-full flex items-center absolute top-0 left-0 text-slate-200 bg-stone-800 px-4 py-2 text-xs font-sans justify-between rounded-t-md"><button id="code-${uuid}" onclick='window.copySnippet("${uuid}");' class="flex ml-auto gap-2"><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" class="h-4 w-4" height="1em" width="1em" xmlns="http://www.w3.org/2000/svg"><path d="M16 4h2a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2H6a2 2 0 0 1-2-2V6a2 2 0 0 1 2-2h2"></path><rect x="8" y="2" width="8" height="4" rx="1" ry="1"></rect></svg>Copy code</button></div><pre class="whitespace-pre-wrap">` +
hljs.highlight(lang, str, true).value +
`<div class="whitespace-pre-line w-full rounded-lg bg-black-900 px-4 pb-4 relative font-mono font-normal text-sm text-slate-200">
<div class="w-full flex items-center absolute top-0 left-0 text-slate-200 bg-stone-800 px-4 py-2 text-xs font-sans justify-between rounded-t-md">
<div class="flex gap-2">
<code class="text-xs">${lang || ""}</code>
</div>
<button data-code-snippet data-code="code-${uuid}" class="flex items-center gap-x-2">
<svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" class="h-4 w-4" height="1em" width="1em" xmlns="http://www.w3.org/2000/svg"><path d="M16 4h2a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2H6a2 2 0 0 1-2-2V6a2 2 0 0 1 2-2h2"></path><rect x="8" y="2" width="8" height="4" rx="1" ry="1"></rect></svg>
<p>Copy code</p>
</button>
</div>
<pre class="whitespace-pre-wrap">` +
hljs.highlight(code, { language: lang, ignoreIllegals: true }).value +
"</pre></div>"
);
} catch (__) {}
}
return (
`<div class="whitespace-pre-line w-full rounded-lg bg-black-900 px-4 pt-10 pb-4 relative font-mono font-normal text-sm text-slate-200"><div class="w-full flex items-center absolute top-0 left-0 text-slate-200 bg-stone-800 px-4 py-2 text-xs font-sans justify-between rounded-t-md"><button id="code-${uuid}" onclick='window.copySnippet("${uuid}");' class="flex ml-auto gap-2"><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" class="h-4 w-4" height="1em" width="1em" xmlns="http://www.w3.org/2000/svg"><path d="M16 4h2a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2H6a2 2 0 0 1-2-2V6a2 2 0 0 1 2-2h2"></path><rect x="8" y="2" width="8" height="4" rx="1" ry="1"></rect></svg>Copy code</button></div><pre class="whitespace-pre-wrap">` +
HTMLEncode(str) +
`<div class="whitespace-pre-line w-full rounded-lg bg-black-900 px-4 pb-4 relative font-mono font-normal text-sm text-slate-200">
<div class="w-full flex items-center absolute top-0 left-0 text-slate-200 bg-stone-800 px-4 py-2 text-xs font-sans justify-between rounded-t-md">
<div class="flex gap-2"><code class="text-xs"></code></div>
<button data-code-snippet data-code="code-${uuid}" class="flex items-center gap-x-2">
<svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" class="h-4 w-4" height="1em" width="1em" xmlns="http://www.w3.org/2000/svg"><path d="M16 4h2a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2H6a2 2 0 0 1-2-2V6a2 2 0 0 1 2-2h2"></path><rect x="8" y="2" width="8" height="4" rx="1" ry="1"></rect></svg>
<p>Copy code</p>
</button>
</div>
<pre class="whitespace-pre-wrap">` +
HTMLEncode(code) +
"</pre></div>"
);
},
});
window.copySnippet = function (uuid = "") {
const target = document.getElementById(`code-${uuid}`);
const markdown =
target.parentElement?.parentElement?.querySelector(
"pre:first-of-type"
)?.innerText;
if (!markdown) return false;
window.navigator.clipboard.writeText(markdown);
target.classList.add("text-green-500");
const originalText = target.innerHTML;
target.innerText = "Copied!";
target.setAttribute("disabled", true);
setTimeout(() => {
target.classList.remove("text-green-500");
target.innerHTML = originalText;
target.removeAttribute("disabled");
}, 5000);
};
export default function renderMarkdown(text = "") {
return markdown.render(text);
}

View File

@ -7,3 +7,8 @@ export const COMPLETE_QUESTIONNAIRE = "anythingllm_completed_questionnaire";
export const USER_BACKGROUND_COLOR = "bg-historical-msg-user";
export const AI_BACKGROUND_COLOR = "bg-historical-msg-system";
export function fullApiUrl() {
if (API_BASE !== "/api") return API_BASE;
return `${window.location.origin}/api`;
}

View File

@ -365,6 +365,26 @@
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.53.0.tgz#bea56f2ed2b5baea164348ff4d5a879f6f81f20d"
integrity sha512-Kn7K8dx/5U6+cT1yEhpX1w4PCSg0M+XyRILPgvwcEBjerFWCwQj5sbr3/VmxqV0JGHCBCzyd6LxypEuehypY1w==
"@floating-ui/core@^1.5.3":
version "1.5.3"
resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.5.3.tgz#b6aa0827708d70971c8679a16cf680a515b8a52a"
integrity sha512-O0WKDOo0yhJuugCx6trZQj5jVJ9yR0ystG2JaNAemYUWce+pmM6WUEFIibnWyEJKdrDxhm75NoSRME35FNaM/Q==
dependencies:
"@floating-ui/utils" "^0.2.0"
"@floating-ui/dom@^1.0.0":
version "1.5.4"
resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.5.4.tgz#28df1e1cb373884224a463235c218dcbd81a16bb"
integrity sha512-jByEsHIY+eEdCjnTVu+E3ephzTOzkQ8hgUfGwos+bg7NlH33Zc5uO+QHz1mrQUOgIKKDD1RtS201P9NvAfq3XQ==
dependencies:
"@floating-ui/core" "^1.5.3"
"@floating-ui/utils" "^0.2.0"
"@floating-ui/utils@^0.2.0":
version "0.2.1"
resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.1.tgz#16308cea045f0fc777b6ff20a9f25474dd8293d2"
integrity sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==
"@humanwhocodes/config-array@^0.11.13":
version "0.11.13"
resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.13.tgz#075dc9684f40a531d9b26b0822153c1e832ee297"
@ -846,6 +866,11 @@ chokidar@^3.5.3:
optionalDependencies:
fsevents "~2.3.2"
classnames@^2.3.0:
version "2.5.1"
resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b"
integrity sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==
cliui@^8.0.1:
version "8.0.1"
resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa"
@ -1021,6 +1046,11 @@ doctrine@^3.0.0:
dependencies:
esutils "^2.0.2"
dompurify@^3.0.8:
version "3.0.8"
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-3.0.8.tgz#e0021ab1b09184bc8af7e35c7dd9063f43a8a437"
integrity sha512-b7uwreMYL2eZhrSCRC4ahLTeZcPZxSmYfmcQGXGkXiZSNW1X85v+SDM5KsWcpivIiUBH47Ji7NtyUdpLeF5JZQ==
electron-to-chromium@^1.4.535:
version "1.4.576"
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.576.tgz#0c6940fdc0d60f7e34bd742b29d8fa847c9294d1"
@ -2538,6 +2568,14 @@ react-toastify@^9.1.3:
dependencies:
clsx "^1.1.1"
react-tooltip@^5.25.2:
version "5.25.2"
resolved "https://registry.yarnpkg.com/react-tooltip/-/react-tooltip-5.25.2.tgz#efb51845ec2e863045812ad1dc1927573922d629"
integrity sha512-MwZ3S9xcHpojZaKqjr5mTs0yp/YBPpKFcayY7MaaIIBr2QskkeeyelpY2YdGLxIMyEj4sxl0rGoK6dQIKvNLlw==
dependencies:
"@floating-ui/dom" "^1.0.0"
classnames "^2.3.0"
react@^18.2.0:
version "18.2.0"
resolved "https://registry.yarnpkg.com/react/-/react-18.2.0.tgz#555bd98592883255fa00de14f1151a917b5d77d5"

36
pull_request_template.md Normal file
View File

@ -0,0 +1,36 @@
### Pull Request Type
<!-- For change type, change [ ] to [x]. -->
- [ ] ✨ feat
- [ ] 🐛 fix
- [ ] ♻️ refactor
- [ ] 💄 style
- [ ] 🔨 chore
- [ ] 📝 docs
### Relevant Issues
<!-- Use "resolves #xxx" to auto resolve on merge. Otherwise, please use "connect #xxx" -->
resolves #xxx
### What is in this change?
Describe the changes in this PR that are impactful to the repo.
### Additional Information
Add any other context about the Pull Request here that was not captured above.
### Developer Validations
<!-- All of the applicable items should be checked. -->
- [ ] I ran `yarn lint` from the root of the repo & committed changes
- [ ] Relevant documentation has been updated
- [ ] I have tested my code functionality
- [ ] Docker build succeeds locally

View File

@ -37,6 +37,14 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
###########################################
######## Embedding API SElECTION ##########
###########################################
@ -82,6 +90,16 @@ VECTOR_DB="lancedb"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
# Enable all below if you are using vector database: Milvus.
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
# Enable all below if you are using vector database: Zilliz Cloud.
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.

1
server/.gitignore vendored
View File

@ -3,6 +3,7 @@
storage/assets/*
!storage/assets/anything-llm.png
storage/documents/*
storage/tmp/*
storage/vector-cache/*.json
storage/exports
storage/imports

View File

@ -5,8 +5,13 @@ const {
checkProcessorAlive,
acceptedFileTypes,
processDocument,
processLink,
} = require("../../../utils/files/documentProcessor");
const { viewLocalFiles } = require("../../../utils/files");
const {
viewLocalFiles,
findDocumentInDocuments,
} = require("../../../utils/files");
const { reqBody } = require("../../../utils/http");
const { handleUploads } = setupMulter();
function apiDocumentEndpoints(app) {
@ -20,7 +25,6 @@ function apiDocumentEndpoints(app) {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Upload a new file to AnythingLLM to be parsed and prepared for embedding.'
#swagger.requestBody = {
description: 'File to be uploaded.',
required: true,
@ -47,6 +51,21 @@ function apiDocumentEndpoints(app) {
example: {
success: true,
error: null,
documents: [
{
"location": "custom-documents/anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"name": "anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"url": "file:///Users/tim/Documents/anything-llm/collector/hotdir/anythingllm.txt",
"title": "anythingllm.txt",
"docAuthor": "Unknown",
"description": "Unknown",
"docSource": "a text file uploaded by the user.",
"chunkSource": "anythingllm.txt",
"published": "1/16/2024, 3:07:00PM",
"wordCount": 93,
"token_count_estimate": 115,
}
]
}
}
}
@ -72,16 +91,113 @@ function apiDocumentEndpoints(app) {
.end();
}
const { success, reason } = await processDocument(originalname);
const { success, reason, documents } =
await processDocument(originalname);
if (!success) {
response.status(500).json({ success: false, error: reason }).end();
response
.status(500)
.json({ success: false, error: reason, documents })
.end();
return;
}
console.log(
`Document ${originalname} uploaded processed and successfully. It is now available in documents.`
);
await Telemetry.sendTelemetry("document_uploaded");
response.status(200).json({ success: true, error: null });
response.status(200).json({ success: true, error: null, documents });
} catch (e) {
console.log(e.message, e);
response.sendStatus(500).end();
}
}
);
app.post(
"/v1/document/upload-link",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Upload a valid URL for AnythingLLM to scrape and prepare for embedding.'
#swagger.requestBody = {
description: 'Link of web address to be scraped.',
required: true,
type: 'file',
content: {
"application/json": {
schema: {
type: 'object',
example: {
"link": "https://useanything.com"
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
error: null,
documents: [
{
"id": "c530dbe6-bff1-4b9e-b87f-710d539d20bc",
"url": "file://useanything_com.html",
"title": "useanything_com.html",
"docAuthor": "no author found",
"description": "No description found.",
"docSource": "URL link uploaded by the user.",
"chunkSource": "https:useanything.com.html",
"published": "1/16/2024, 3:46:33PM",
"wordCount": 252,
"pageContent": "AnythingLLM is the best....",
"token_count_estimate": 447,
"location": "custom-documents/url-useanything_com-c530dbe6-bff1-4b9e-b87f-710d539d20bc.json"
}
]
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { link } = reqBody(request);
const processingOnline = await checkProcessorAlive();
if (!processingOnline) {
response
.status(500)
.json({
success: false,
error: `Document processing API is not online. Link ${link} will not be processed automatically.`,
})
.end();
}
const { success, reason, documents } = await processLink(link);
if (!success) {
response
.status(500)
.json({ success: false, error: reason, documents })
.end();
return;
}
console.log(
`Link ${link} uploaded processed and successfully. It is now available in documents.`
);
await Telemetry.sendTelemetry("document_uploaded");
response.status(200).json({ success: true, error: null, documents });
} catch (e) {
console.log(e.message, e);
response.sendStatus(500).end();
@ -133,6 +249,61 @@ function apiDocumentEndpoints(app) {
}
});
app.get("/v1/document/:docName", [validApiKey], async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Get a single document by its unique AnythingLLM document name'
#swagger.parameters['docName'] = {
in: 'path',
description: 'Unique document name to find (name in /documents)',
required: true,
type: 'string'
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
"localFiles": {
"name": "documents",
"type": "folder",
items: [
{
"name": "my-stored-document.txt-uuid1234.json",
"type": "file",
"id": "bb07c334-4dab-4419-9462-9d00065a49a1",
"url": "file://my-stored-document.txt",
"title": "my-stored-document.txt",
"cached": false
},
]
}
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { docName } = request.params;
const document = await findDocumentInDocuments(docName);
if (!document) {
response.sendStatus(404).end();
return;
}
response.status(200).json({ document });
} catch (e) {
console.log(e.message, e);
response.sendStatus(500).end();
}
});
app.get(
"/v1/document/accepted-file-types",
[validApiKey],

View File

@ -139,7 +139,7 @@ function apiSystemEndpoints(app) {
*/
try {
const body = reqBody(request);
const { newValues, error } = updateENV(body);
const { newValues, error } = await updateENV(body);
if (process.env.NODE_ENV === "production") await dumpENV();
response.status(200).json({ newValues, error });
} catch (e) {

View File

@ -11,6 +11,11 @@ const {
const { getVectorDbClass } = require("../../../utils/helpers");
const { multiUserMode, reqBody } = require("../../../utils/http");
const { validApiKey } = require("../../../utils/middleware/validApiKey");
const {
streamChatWithWorkspace,
writeResponseChunk,
VALID_CHAT_MODE,
} = require("../../../utils/chats/stream");
function apiWorkspaceEndpoints(app) {
if (!app) return;
@ -196,10 +201,11 @@ function apiWorkspaceEndpoints(app) {
return;
}
await WorkspaceChats.delete({ workspaceId: Number(workspace.id) });
await DocumentVectors.deleteForWorkspace(Number(workspace.id));
await Document.delete({ workspaceId: Number(workspace.id) });
await Workspace.delete({ id: Number(workspace.id) });
const workspaceId = Number(workspace.id);
await WorkspaceChats.delete({ workspaceId: workspaceId });
await DocumentVectors.deleteForWorkspace(workspaceId);
await Document.delete({ workspaceId: workspaceId });
await Workspace.delete({ id: workspaceId });
try {
await VectorDb["delete-namespace"]({ namespace: slug });
} catch (e) {
@ -375,8 +381,8 @@ function apiWorkspaceEndpoints(app) {
content: {
"application/json": {
example: {
adds: [],
deletes: ["custom-documents/anythingllm-hash.json"]
adds: ["custom-documents/my-pdf.pdf-hash.json"],
deletes: ["custom-documents/anythingllm.txt-hash.json"]
}
}
}
@ -441,7 +447,7 @@ function apiWorkspaceEndpoints(app) {
#swagger.tags = ['Workspaces']
#swagger.description = 'Execute a chat with a workspace'
#swagger.requestBody = {
description: 'prompt to send to the workspace and the type of conversation (query or chat).',
description: 'Send a prompt to the workspace and the type of conversation (query or chat).<br/><b>Query:</b> Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.<br/><b>Chat:</b> Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.',
required: true,
type: 'object',
content: {
@ -482,7 +488,28 @@ function apiWorkspaceEndpoints(app) {
const workspace = await Workspace.get({ slug });
if (!workspace) {
response.sendStatus(400).end();
response.status(400).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: `Workspace ${slug} is not a valid workspace.`,
});
return;
}
if (!message?.length || !VALID_CHAT_MODE.includes(mode)) {
response.status(400).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: !message?.length
? "message parameter cannot be empty."
: `${mode} is not a valid mode.`,
});
return;
}
@ -505,6 +532,126 @@ function apiWorkspaceEndpoints(app) {
}
}
);
app.post(
"/v1/workspace/:slug/stream-chat",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Workspaces']
#swagger.description = 'Execute a streamable chat with a workspace'
#swagger.requestBody = {
description: 'Send a prompt to the workspace and the type of conversation (query or chat).<br/><b>Query:</b> Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.<br/><b>Chat:</b> Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.',
required: true,
type: 'object',
content: {
"application/json": {
example: {
message: "What is AnythingLLM?",
mode: "query | chat"
}
}
}
}
#swagger.responses[200] = {
content: {
"text/event-stream": {
schema: {
type: 'array',
example: [
{
id: 'uuid-123',
type: "abort | textResponseChunk",
textResponse: "First chunk",
sources: [],
close: false,
error: "null | text string of the failure mode."
},
{
id: 'uuid-123',
type: "abort | textResponseChunk",
textResponse: "chunk two",
sources: [],
close: false,
error: "null | text string of the failure mode."
},
{
id: 'uuid-123',
type: "abort | textResponseChunk",
textResponse: "final chunk of LLM output!",
sources: [{title: "anythingllm.txt", chunk: "This is a context chunk used in the answer of the prompt by the LLM. This will only return in the final chunk."}],
close: true,
error: "null | text string of the failure mode."
}
]
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { slug } = request.params;
const { message, mode = "query" } = reqBody(request);
const workspace = await Workspace.get({ slug });
if (!workspace) {
response.status(400).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: `Workspace ${slug} is not a valid workspace.`,
});
return;
}
if (!message?.length || !VALID_CHAT_MODE.includes(mode)) {
response.status(400).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: !message?.length
? "Message is empty"
: `${mode} is not a valid mode.`,
});
return;
}
response.setHeader("Cache-Control", "no-cache");
response.setHeader("Content-Type", "text/event-stream");
response.setHeader("Access-Control-Allow-Origin", "*");
response.setHeader("Connection", "keep-alive");
response.flushHeaders();
await streamChatWithWorkspace(response, workspace, message, mode);
await Telemetry.sendTelemetry("sent_chat", {
LLMSelection: process.env.LLM_PROVIDER || "openai",
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
VectorDbSelection: process.env.VECTOR_DB || "pinecone",
});
response.end();
} catch (e) {
console.error(e);
writeResponseChunk(response, {
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
response.end();
}
}
);
}
module.exports = { apiWorkspaceEndpoints };

View File

@ -1,7 +1,6 @@
const { v4: uuidv4 } = require("uuid");
const { reqBody, userFromSession, multiUserMode } = require("../utils/http");
const { Workspace } = require("../models/workspace");
const { chatWithWorkspace } = require("../utils/chats");
const { validatedRequest } = require("../utils/middleware/validatedRequest");
const { WorkspaceChats } = require("../models/workspaceChats");
const { SystemSettings } = require("../models/systemSettings");
@ -9,6 +8,7 @@ const { Telemetry } = require("../models/telemetry");
const {
streamChatWithWorkspace,
writeResponseChunk,
VALID_CHAT_MODE,
} = require("../utils/chats/stream");
function chatEndpoints(app) {
@ -32,6 +32,20 @@ function chatEndpoints(app) {
return;
}
if (!message?.length || !VALID_CHAT_MODE.includes(mode)) {
response.status(400).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: !message?.length
? "Message is empty."
: `${mode} is not a valid mode.`,
});
return;
}
response.setHeader("Cache-Control", "no-cache");
response.setHeader("Content-Type", "text/event-stream");
response.setHeader("Access-Control-Allow-Origin", "*");
@ -95,85 +109,6 @@ function chatEndpoints(app) {
}
}
);
app.post(
"/workspace/:slug/chat",
[validatedRequest],
async (request, response) => {
try {
const user = await userFromSession(request, response);
const { slug } = request.params;
const { message, mode = "query" } = reqBody(request);
const workspace = multiUserMode(response)
? await Workspace.getWithUser(user, { slug })
: await Workspace.get({ slug });
if (!workspace) {
response.sendStatus(400).end();
return;
}
if (multiUserMode(response) && user.role !== "admin") {
const limitMessagesSetting = await SystemSettings.get({
label: "limit_user_messages",
});
const limitMessages = limitMessagesSetting?.value === "true";
if (limitMessages) {
const messageLimitSetting = await SystemSettings.get({
label: "message_limit",
});
const systemLimit = Number(messageLimitSetting?.value);
if (!!systemLimit) {
const currentChatCount = await WorkspaceChats.count({
user_id: user.id,
createdAt: {
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
},
});
if (currentChatCount >= systemLimit) {
response.status(500).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
});
return;
}
}
}
}
const result = await chatWithWorkspace(workspace, message, mode, user);
await Telemetry.sendTelemetry(
"sent_chat",
{
multiUserMode: multiUserMode(response),
LLMSelection: process.env.LLM_PROVIDER || "openai",
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
VectorDbSelection: process.env.VECTOR_DB || "pinecone",
},
user?.id
);
response.status(200).json({ ...result });
} catch (e) {
console.error(e);
response.status(500).json({
id: uuidv4(),
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
}
}
);
}
module.exports = { chatEndpoints };

View File

@ -33,7 +33,7 @@ function inviteEndpoints(app) {
app.post("/invite/:code", async (request, response) => {
try {
const { code } = request.params;
const userParams = reqBody(request);
const { username, password } = reqBody(request);
const invite = await Invite.get({ code });
if (!invite || invite.status !== "pending") {
response
@ -42,7 +42,11 @@ function inviteEndpoints(app) {
return;
}
const { user, error } = await User.create(userParams);
const { user, error } = await User.create({
username,
password,
role: "default",
});
if (!user) {
console.error("Accepting invite:", error);
response

View File

@ -1,4 +1,5 @@
const path = require("path");
const fs = require("fs");
process.env.NODE_ENV === "development"
? require("dotenv").config({ path: `.env.${process.env.NODE_ENV}` })
: require("dotenv").config({
@ -7,7 +8,7 @@ process.env.NODE_ENV === "development"
: path.resolve(__dirname, ".env"),
});
const { viewLocalFiles } = require("../utils/files");
const { viewLocalFiles, normalizePath } = require("../utils/files");
const { exportData, unpackAndOverwriteImport } = require("../utils/files/data");
const {
checkProcessorAlive,
@ -21,6 +22,7 @@ const {
makeJWT,
userFromSession,
multiUserMode,
queryParams,
} = require("../utils/http");
const {
setupDataImports,
@ -34,7 +36,6 @@ const { validatedRequest } = require("../utils/middleware/validatedRequest");
const { handleImports } = setupDataImports();
const { handleLogoUploads } = setupLogoUploads();
const { handlePfpUploads } = setupPfpUploads();
const fs = require("fs");
const {
getDefaultFilename,
determineLogoFilepath,
@ -111,6 +112,8 @@ function systemEndpoints(app) {
app.post("/request-token", async (request, response) => {
try {
const bcrypt = require("bcrypt");
if (await SystemSettings.isMultiUserMode()) {
const { username, password } = reqBody(request);
const existingUser = await User.get({ username });
@ -125,7 +128,6 @@ function systemEndpoints(app) {
return;
}
const bcrypt = require("bcrypt");
if (!bcrypt.compareSync(password, existingUser.password)) {
response.status(200).json({
user: null,
@ -163,7 +165,12 @@ function systemEndpoints(app) {
return;
} else {
const { password } = reqBody(request);
if (password !== process.env.AUTH_TOKEN) {
if (
!bcrypt.compareSync(
password,
bcrypt.hashSync(process.env.AUTH_TOKEN, 10)
)
) {
response.status(401).json({
valid: false,
token: null,
@ -185,16 +192,23 @@ function systemEndpoints(app) {
}
});
app.get("/system/system-vectors", [validatedRequest], async (_, response) => {
app.get(
"/system/system-vectors",
[validatedRequest],
async (request, response) => {
try {
const query = queryParams(request);
const VectorDb = getVectorDbClass();
const vectorCount = await VectorDb.totalVectors();
const vectorCount = !!query.slug
? await VectorDb.namespaceCount(query.slug)
: await VectorDb.totalVectors();
response.status(200).json({ vectorCount });
} catch (e) {
console.log(e.message, e);
response.sendStatus(500).end();
}
});
}
);
app.delete(
"/system/remove-document",
@ -274,8 +288,14 @@ function systemEndpoints(app) {
[validatedRequest, flexUserRoleValid],
async (request, response) => {
try {
const user = await userFromSession(request, response);
if (!!user && user.role !== "admin") {
response.sendStatus(401).end();
return;
}
const body = reqBody(request);
const { newValues, error } = updateENV(body);
const { newValues, error } = await updateENV(body);
if (process.env.NODE_ENV === "production") await dumpENV();
response.status(200).json({ newValues, error });
} catch (e) {
@ -297,7 +317,7 @@ function systemEndpoints(app) {
}
const { usePassword, newPassword } = reqBody(request);
const { error } = updateENV(
const { error } = await updateENV(
{
AuthToken: usePassword ? newPassword : "",
JWTSecret: usePassword ? v4() : "",
@ -340,7 +360,7 @@ function systemEndpoints(app) {
message_limit: 25,
});
updateENV(
await updateENV(
{
AuthToken: "",
JWTSecret: process.env.JWT_SECRET || v4(),
@ -374,7 +394,10 @@ function systemEndpoints(app) {
}
});
app.get("/system/data-export", [validatedRequest], async (_, response) => {
app.get(
"/system/data-export",
[validatedRequest, flexUserRoleValid],
async (_, response) => {
try {
const { filename, error } = await exportData();
response.status(200).json({ filename, error });
@ -382,13 +405,12 @@ function systemEndpoints(app) {
console.log(e.message, e);
response.sendStatus(500).end();
}
});
}
);
app.get("/system/data-exports/:filename", (request, response) => {
const exportLocation = __dirname + "/../storage/exports/";
const sanitized = path
.normalize(request.params.filename)
.replace(/^(\.\.(\/|\\|$))+/, "");
const sanitized = normalizePath(request.params.filename);
const finalDestination = path.join(exportLocation, sanitized);
if (!fs.existsSync(finalDestination)) {
@ -489,7 +511,8 @@ function systemEndpoints(app) {
}
const userRecord = await User.get({ id: user.id });
const oldPfpFilename = userRecord.pfpFilename;
const oldPfpFilename = normalizePath(userRecord.pfpFilename);
console.log("oldPfpFilename", oldPfpFilename);
if (oldPfpFilename) {
const oldPfpPath = path.join(
@ -523,7 +546,7 @@ function systemEndpoints(app) {
try {
const user = await userFromSession(request, response);
const userRecord = await User.get({ id: user.id });
const oldPfpFilename = userRecord.pfpFilename;
const oldPfpFilename = normalizePath(userRecord.pfpFilename);
console.log("oldPfpFilename", oldPfpFilename);
if (oldPfpFilename) {
const oldPfpPath = path.join(

View File

@ -60,6 +60,19 @@ const SystemSettings = {
QdrantApiKey: process.env.QDRANT_API_KEY,
}
: {}),
...(vectorDB === "milvus"
? {
MilvusAddress: process.env.MILVUS_ADDRESS,
MilvusUsername: process.env.MILVUS_USERNAME,
MilvusPassword: !!process.env.MILVUS_PASSWORD,
}
: {}),
...(vectorDB === "zilliz"
? {
ZillizEndpoint: process.env.ZILLIZ_ENDPOINT,
ZillizApiToken: process.env.ZILLIZ_API_TOKEN,
}
: {}),
LLMProvider: llmProvider,
...(llmProvider === "openai"
? {
@ -144,9 +157,40 @@ const SystemSettings = {
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "togetherai"
? {
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
// For embedding credentials when ollama is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "mistral"
? {
MistralApiKey: !!process.env.MISTRAL_API_KEY,
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
// For embedding credentials when mistral is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "native"
? {
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,
// For embedding credentials when ollama is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
};

View File

@ -31,7 +31,10 @@ const WelcomeMessages = {
await prisma.welcome_messages.deleteMany({}); // Delete all existing messages
// Create new messages
// We create each message individually because prisma
// with sqlite does not support createMany()
for (const [index, message] of messages.entries()) {
if (!message.response) continue;
await prisma.welcome_messages.create({
data: {
user: message.user,

View File

@ -14,6 +14,7 @@ const Workspace = {
"lastUpdatedAt",
"openAiPrompt",
"similarityThreshold",
"chatModel",
],
new: async function (name = null, creatorId = null) {
@ -191,6 +192,20 @@ const Workspace = {
return { success: false, error: error.message };
}
},
resetWorkspaceChatModels: async () => {
try {
await prisma.workspaces.updateMany({
data: {
chatModel: null,
},
});
return { success: true, error: null };
} catch (error) {
console.error("Error resetting workspace chat models:", error.message);
return { success: false, error: error.message };
}
},
};
module.exports = { Workspace };

View File

@ -27,7 +27,8 @@
"@pinecone-database/pinecone": "^0.1.6",
"@prisma/client": "5.3.0",
"@qdrant/js-client-rest": "^1.4.0",
"@xenova/transformers": "^2.10.0",
"@xenova/transformers": "^2.14.0",
"@zilliz/milvus2-sdk-node": "^2.3.5",
"archiver": "^5.3.1",
"bcrypt": "^5.1.0",
"body-parser": "^1.20.2",

View File

@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "workspaces" ADD COLUMN "chatModel" TEXT;

View File

@ -93,6 +93,7 @@ model workspaces {
lastUpdatedAt DateTime @default(now())
openAiPrompt String?
similarityThreshold Float? @default(0.25)
chatModel String?
workspace_users workspace_users[]
documents workspace_documents[]
}

View File

@ -1,4 +1,6 @@
const swaggerAutogen = require('swagger-autogen')({ openapi: '3.0.0' });
const fs = require('fs')
const path = require('path')
const doc = {
info: {
@ -6,6 +8,8 @@ const doc = {
title: 'AnythingLLM Developer API',
description: 'API endpoints that enable programmatic reading, writing, and updating of your AnythingLLM instance. UI supplied by Swagger.io.',
},
// Swagger-autogen does not allow us to use relative paths as these will resolve to
// http:///api in the openapi.json file, so we need to monkey-patch this post-generation.
host: '/api',
schemes: ['http'],
securityDefinitions: {
@ -25,7 +29,7 @@ const doc = {
}
};
const outputFile = './openapi.json';
const outputFile = path.resolve(__dirname, './openapi.json');
const endpointsFiles = [
'../endpoints/api/auth/index.js',
'../endpoints/api/admin/index.js',
@ -35,3 +39,13 @@ const endpointsFiles = [
];
swaggerAutogen(outputFile, endpointsFiles, doc)
.then(({ data }) => {
const openApiSpec = {
...data,
servers: [{
url: "/api"
}]
}
fs.writeFileSync(outputFile, JSON.stringify(openApiSpec, null, 2), { encoding: 'utf-8', flag: 'w' });
console.log(`Swagger-autogen: \x1b[32mPatched servers.url ✔\x1b[0m`)
})

View File

@ -7,7 +7,7 @@
},
"servers": [
{
"url": "http:///api/"
"url": "/api"
}
],
"paths": {
@ -845,7 +845,22 @@
"type": "object",
"example": {
"success": true,
"error": null
"error": null,
"documents": [
{
"location": "custom-documents/anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"name": "anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"url": "file://Users/tim/Documents/anything-llm/collector/hotdir/anythingllm.txt",
"title": "anythingllm.txt",
"docAuthor": "Unknown",
"description": "Unknown",
"docSource": "a text file uploaded by the user.",
"chunkSource": "anythingllm.txt",
"published": "1/16/2024, 3:07:00PM",
"wordCount": 93,
"token_count_estimate": 115
}
]
}
}
}
@ -890,6 +905,88 @@
}
}
},
"/v1/document/upload-link": {
"post": {
"tags": [
"Documents"
],
"description": "Upload a valid URL for AnythingLLM to scrape and prepare for embedding.",
"parameters": [
{
"name": "Authorization",
"in": "header",
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "object",
"example": {
"success": true,
"error": null,
"documents": [
{
"id": "c530dbe6-bff1-4b9e-b87f-710d539d20bc",
"url": "file://useanything_com.html",
"title": "useanything_com.html",
"docAuthor": "no author found",
"description": "No description found.",
"docSource": "URL link uploaded by the user.",
"chunkSource": "https:useanything.com.html",
"published": "1/16/2024, 3:46:33PM",
"wordCount": 252,
"pageContent": "AnythingLLM is the best....",
"token_count_estimate": 447,
"location": "custom-documents/url-useanything_com-c530dbe6-bff1-4b9e-b87f-710d539d20bc.json"
}
]
}
}
}
}
},
"403": {
"description": "Forbidden",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
},
"application/xml": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
}
}
},
"500": {
"description": "Internal Server Error"
}
},
"requestBody": {
"description": "Link of web address to be scraped.",
"required": true,
"type": "file",
"content": {
"application/json": {
"schema": {
"type": "object",
"example": {
"link": "https://useanything.com"
}
}
}
}
}
}
},
"/v1/documents": {
"get": {
"tags": [
@ -953,6 +1050,81 @@
}
}
},
"/v1/document/{docName}": {
"get": {
"tags": [
"Documents"
],
"description": "Get a single document by its unique AnythingLLM document name",
"parameters": [
{
"name": "docName",
"in": "path",
"required": true,
"schema": {
"type": "string"
},
"description": "Unique document name to find (name in /documents)"
},
{
"name": "Authorization",
"in": "header",
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "object",
"example": {
"localFiles": {
"name": "documents",
"type": "folder",
"items": [
{
"name": "my-stored-document.txt-uuid1234.json",
"type": "file",
"id": "bb07c334-4dab-4419-9462-9d00065a49a1",
"url": "file://my-stored-document.txt",
"title": "my-stored-document.txt",
"cached": false
}
]
}
}
}
}
}
},
"403": {
"description": "Forbidden",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
},
"application/xml": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
}
}
},
"404": {
"description": "Not Found"
},
"500": {
"description": "Internal Server Error"
}
}
}
},
"/v1/document/accepted-file-types": {
"get": {
"tags": [
@ -1518,9 +1690,11 @@
"content": {
"application/json": {
"example": {
"adds": [],
"adds": [
"custom-documents/my-pdf.pdf-hash.json"
],
"deletes": [
"custom-documents/anythingllm-hash.json"
"custom-documents/anythingllm.txt-hash.json"
]
}
}
@ -1598,7 +1772,106 @@
}
},
"requestBody": {
"description": "prompt to send to the workspace and the type of conversation (query or chat).",
"description": "Send a prompt to the workspace and the type of conversation (query or chat).<br/><b>Query:</b> Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.<br/><b>Chat:</b> Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.",
"required": true,
"type": "object",
"content": {
"application/json": {
"example": {
"message": "What is AnythingLLM?",
"mode": "query | chat"
}
}
}
}
}
},
"/v1/workspace/{slug}/stream-chat": {
"post": {
"tags": [
"Workspaces"
],
"description": "Execute a streamable chat with a workspace",
"parameters": [
{
"name": "slug",
"in": "path",
"required": true,
"schema": {
"type": "string"
}
},
{
"name": "Authorization",
"in": "header",
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"content": {
"text/event-stream": {
"schema": {
"type": "array",
"example": [
{
"id": "uuid-123",
"type": "abort | textResponseChunk",
"textResponse": "First chunk",
"sources": [],
"close": false,
"error": "null | text string of the failure mode."
},
{
"id": "uuid-123",
"type": "abort | textResponseChunk",
"textResponse": "chunk two",
"sources": [],
"close": false,
"error": "null | text string of the failure mode."
},
{
"id": "uuid-123",
"type": "abort | textResponseChunk",
"textResponse": "final chunk of LLM output!",
"sources": [
{
"title": "anythingllm.txt",
"chunk": "This is a context chunk used in the answer of the prompt by the LLM. This will only return in the final chunk."
}
],
"close": true,
"error": "null | text string of the failure mode."
}
]
}
}
},
"description": "OK"
},
"400": {
"description": "Bad Request"
},
"403": {
"description": "Forbidden",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
},
"application/xml": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
}
}
}
},
"requestBody": {
"description": "Send a prompt to the workspace and the type of conversation (query or chat).<br/><b>Query:</b> Will not use LLM unless there are relevant sources from vectorDB & does not recall chat history.<br/><b>Chat:</b> Uses LLM general knowledge w/custom embeddings to produce output, uses rolling chat history.",
"required": true,
"type": "object",
"content": {

View File

@ -2,7 +2,7 @@ const { v4 } = require("uuid");
const { chatPrompt } = require("../../chats");
class AnthropicLLM {
constructor(embedder = null) {
constructor(embedder = null, modelPreference = null) {
if (!process.env.ANTHROPIC_API_KEY)
throw new Error("No Anthropic API key was set.");
@ -12,7 +12,8 @@ class AnthropicLLM {
apiKey: process.env.ANTHROPIC_API_KEY,
});
this.anthropic = anthropic;
this.model = process.env.ANTHROPIC_MODEL_PREF || "claude-2";
this.model =
modelPreference || process.env.ANTHROPIC_MODEL_PREF || "claude-2";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -25,6 +26,7 @@ class AnthropicLLM {
);
this.embedder = embedder;
this.answerKey = v4().split("-")[0];
this.defaultTemp = 0.7;
}
streamingEnabled() {

View File

@ -2,7 +2,7 @@ const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi");
const { chatPrompt } = require("../../chats");
class AzureOpenAiLLM {
constructor(embedder = null) {
constructor(embedder = null, _modelPreference = null) {
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
if (!process.env.AZURE_OPENAI_ENDPOINT)
throw new Error("No Azure API endpoint was set.");
@ -25,6 +25,7 @@ class AzureOpenAiLLM {
"No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
@ -93,7 +94,7 @@ class AzureOpenAiLLM {
);
const textResponse = await this.openai
.getChatCompletions(this.model, messages, {
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
})
.then((res) => {
@ -130,7 +131,7 @@ class AzureOpenAiLLM {
this.model,
messages,
{
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
}
);

View File

@ -1,14 +1,15 @@
const { chatPrompt } = require("../../chats");
class GeminiLLM {
constructor(embedder = null) {
constructor(embedder = null, modelPreference = null) {
if (!process.env.GEMINI_API_KEY)
throw new Error("No Gemini API key was set.");
// Docs: https://ai.google.dev/tutorials/node_quickstart
const { GoogleGenerativeAI } = require("@google/generative-ai");
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
this.model = process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro";
this.model =
modelPreference || process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro";
this.gemini = genAI.getGenerativeModel({ model: this.model });
this.limits = {
history: this.promptWindowLimit() * 0.15,
@ -21,6 +22,7 @@ class GeminiLLM {
"INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; // not used for Gemini
}
#appendContext(contextTexts = []) {

View File

@ -2,7 +2,7 @@ const { chatPrompt } = require("../../chats");
// hybrid of openAi LLM chat completion for LMStudio
class LMStudioLLM {
constructor(embedder = null) {
constructor(embedder = null, _modelPreference = null) {
if (!process.env.LMSTUDIO_BASE_PATH)
throw new Error("No LMStudio API Base Path was set.");
@ -12,7 +12,7 @@ class LMStudioLLM {
});
this.lmstudio = new OpenAIApi(config);
// When using LMStudios inference server - the model param is not required so
// we can stub it here.
// we can stub it here. LMStudio can only run one model at a time.
this.model = "model-placeholder";
this.limits = {
history: this.promptWindowLimit() * 0.15,
@ -25,6 +25,7 @@ class LMStudioLLM {
"INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
@ -85,7 +86,7 @@ class LMStudioLLM {
const textResponse = await this.lmstudio
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
@ -122,7 +123,7 @@ class LMStudioLLM {
const streamRequest = await this.lmstudio.createChatCompletion(
{
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
stream: true,
messages: await this.compressMessages(

View File

@ -1,7 +1,7 @@
const { chatPrompt } = require("../../chats");
class LocalAiLLM {
constructor(embedder = null) {
constructor(embedder = null, modelPreference = null) {
if (!process.env.LOCAL_AI_BASE_PATH)
throw new Error("No LocalAI Base Path was set.");
@ -15,7 +15,7 @@ class LocalAiLLM {
: {}),
});
this.openai = new OpenAIApi(config);
this.model = process.env.LOCAL_AI_MODEL_PREF;
this.model = modelPreference || process.env.LOCAL_AI_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -27,6 +27,7 @@ class LocalAiLLM {
"INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
@ -85,7 +86,7 @@ class LocalAiLLM {
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
@ -123,7 +124,7 @@ class LocalAiLLM {
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{

View File

@ -0,0 +1,184 @@
const { chatPrompt } = require("../../chats");
class MistralLLM {
constructor(embedder = null, modelPreference = null) {
const { Configuration, OpenAIApi } = require("openai");
if (!process.env.MISTRAL_API_KEY)
throw new Error("No Mistral API key was set.");
const config = new Configuration({
basePath: "https://api.mistral.ai/v1",
apiKey: process.env.MISTRAL_API_KEY,
});
this.openai = new OpenAIApi(config);
this.model =
modelPreference || process.env.MISTRAL_MODEL_PREF || "mistral-tiny";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
if (!embedder)
console.warn(
"No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!"
);
this.embedder = embedder;
this.defaultTemp = 0.0;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}
promptWindowLimit() {
return 32000;
}
async isValidChatCompletionModel(modelName = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async isSafe(_ = "") {
return { safe: true, reasons: [] };
}
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
})
.then((json) => {
const res = json.data;
if (!res.hasOwnProperty("choices"))
throw new Error("Mistral chat: No results!");
if (res.choices.length === 0)
throw new Error("Mistral chat: No results length!");
return res.choices[0].message.content;
})
.catch((error) => {
throw new Error(
`Mistral::createChatCompletion failed with: ${error.message}`
);
});
return textResponse;
}
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
},
{ responseType: "stream" }
);
return streamRequest;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const { data } = await this.openai.createChatCompletion({
model: this.model,
messages,
temperature,
});
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
messages,
temperature,
},
{ responseType: "stream" }
);
return streamRequest;
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
MistralLLM,
};

View File

@ -10,11 +10,11 @@ const ChatLlamaCpp = (...args) =>
);
class NativeLLM {
constructor(embedder = null) {
constructor(embedder = null, modelPreference = null) {
if (!process.env.NATIVE_LLM_MODEL_PREF)
throw new Error("No local Llama model was set.");
this.model = process.env.NATIVE_LLM_MODEL_PREF || null;
this.model = modelPreference || process.env.NATIVE_LLM_MODEL_PREF || null;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -29,6 +29,7 @@ class NativeLLM {
// Make directory when it does not exist in existing installations
if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir);
this.defaultTemp = 0.7;
}
async #initializeLlamaModel(temperature = 0.7) {
@ -93,8 +94,6 @@ class NativeLLM {
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
// DEV: Currently this ENV is not configurable.
promptWindowLimit() {
const limit = process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
@ -132,7 +131,7 @@ class NativeLLM {
);
const model = await this.#llamaClient({
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
});
const response = await model.call(messages);
return response.content;
@ -145,7 +144,7 @@ class NativeLLM {
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const model = await this.#llamaClient({
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
});
const messages = await this.compressMessages(
{

View File

@ -3,12 +3,12 @@ const { StringOutputParser } = require("langchain/schema/output_parser");
// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md
class OllamaAILLM {
constructor(embedder = null) {
constructor(embedder = null, modelPreference = null) {
if (!process.env.OLLAMA_BASE_PATH)
throw new Error("No Ollama Base Path was set.");
this.basePath = process.env.OLLAMA_BASE_PATH;
this.model = process.env.OLLAMA_MODEL_PREF;
this.model = modelPreference || process.env.OLLAMA_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -20,6 +20,7 @@ class OllamaAILLM {
"INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7;
}
#ollamaClient({ temperature = 0.07 }) {
@ -113,7 +114,7 @@ class OllamaAILLM {
);
const model = this.#ollamaClient({
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
});
const textResponse = await model
.pipe(new StringOutputParser())
@ -136,7 +137,7 @@ class OllamaAILLM {
);
const model = this.#ollamaClient({
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
});
const stream = await model
.pipe(new StringOutputParser())

View File

@ -2,7 +2,7 @@ const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
const { chatPrompt } = require("../../chats");
class OpenAiLLM {
constructor(embedder = null) {
constructor(embedder = null, modelPreference = null) {
const { Configuration, OpenAIApi } = require("openai");
if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set.");
@ -10,7 +10,8 @@ class OpenAiLLM {
apiKey: process.env.OPEN_AI_KEY,
});
this.openai = new OpenAIApi(config);
this.model = process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo";
this.model =
modelPreference || process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -22,6 +23,7 @@ class OpenAiLLM {
"No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new OpenAiEmbedder() : embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
@ -126,7 +128,7 @@ class OpenAiLLM {
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
@ -164,7 +166,7 @@ class OpenAiLLM {
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? 0.7),
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{

View File

@ -0,0 +1,199 @@
const { chatPrompt } = require("../../chats");
function togetherAiModels() {
const { MODELS } = require("./models.js");
return MODELS || {};
}
class TogetherAiLLM {
constructor(embedder = null, modelPreference = null) {
const { Configuration, OpenAIApi } = require("openai");
if (!process.env.TOGETHER_AI_API_KEY)
throw new Error("No TogetherAI API key was set.");
const config = new Configuration({
basePath: "https://api.together.xyz/v1",
apiKey: process.env.TOGETHER_AI_API_KEY,
});
this.openai = new OpenAIApi(config);
this.model = modelPreference || process.env.TOGETHER_AI_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
if (!embedder)
throw new Error(
"INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
allModelInformation() {
return togetherAiModels();
}
streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const availableModels = this.allModelInformation();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
const availableModels = this.allModelInformation();
return availableModels.hasOwnProperty(model);
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async isSafe(_input = "") {
// Not implemented so must be stubbed
return { safe: true, reasons: [] };
}
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Together AI chat: ${this.model} is not valid for chat completion!`
);
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
})
.then((json) => {
const res = json.data;
if (!res.hasOwnProperty("choices"))
throw new Error("Together AI chat: No results!");
if (res.choices.length === 0)
throw new Error("Together AI chat: No results length!");
return res.choices[0].message.content;
})
.catch((error) => {
throw new Error(
`TogetherAI::createChatCompletion failed with: ${error.message}`
);
});
return textResponse;
}
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`TogetherAI chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
},
{ responseType: "stream" }
);
return { type: "togetherAiStream", stream: streamRequest };
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`TogetherAI chat: ${this.model} is not valid for chat completion!`
);
const { data } = await this.openai.createChatCompletion({
model: this.model,
messages,
temperature,
});
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`TogetherAI chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
messages,
temperature,
},
{ responseType: "stream" }
);
return { type: "togetherAiStream", stream: streamRequest };
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
TogetherAiLLM,
togetherAiModels,
};

View File

@ -0,0 +1,226 @@
const MODELS = {
"togethercomputer/alpaca-7b": {
id: "togethercomputer/alpaca-7b",
organization: "Stanford",
name: "Alpaca (7B)",
maxLength: 2048,
},
"Austism/chronos-hermes-13b": {
id: "Austism/chronos-hermes-13b",
organization: "Austism",
name: "Chronos Hermes (13B)",
maxLength: 2048,
},
"togethercomputer/CodeLlama-13b-Instruct": {
id: "togethercomputer/CodeLlama-13b-Instruct",
organization: "Meta",
name: "Code Llama Instruct (13B)",
maxLength: 8192,
},
"togethercomputer/CodeLlama-34b-Instruct": {
id: "togethercomputer/CodeLlama-34b-Instruct",
organization: "Meta",
name: "Code Llama Instruct (34B)",
maxLength: 8192,
},
"togethercomputer/CodeLlama-7b-Instruct": {
id: "togethercomputer/CodeLlama-7b-Instruct",
organization: "Meta",
name: "Code Llama Instruct (7B)",
maxLength: 8192,
},
"DiscoResearch/DiscoLM-mixtral-8x7b-v2": {
id: "DiscoResearch/DiscoLM-mixtral-8x7b-v2",
organization: "DiscoResearch",
name: "DiscoLM Mixtral 8x7b",
maxLength: 32768,
},
"togethercomputer/falcon-40b-instruct": {
id: "togethercomputer/falcon-40b-instruct",
organization: "TII UAE",
name: "Falcon Instruct (40B)",
maxLength: 2048,
},
"togethercomputer/falcon-7b-instruct": {
id: "togethercomputer/falcon-7b-instruct",
organization: "TII UAE",
name: "Falcon Instruct (7B)",
maxLength: 2048,
},
"togethercomputer/GPT-NeoXT-Chat-Base-20B": {
id: "togethercomputer/GPT-NeoXT-Chat-Base-20B",
organization: "Together",
name: "GPT-NeoXT-Chat-Base (20B)",
maxLength: 2048,
},
"togethercomputer/llama-2-13b-chat": {
id: "togethercomputer/llama-2-13b-chat",
organization: "Meta",
name: "LLaMA-2 Chat (13B)",
maxLength: 4096,
},
"togethercomputer/llama-2-70b-chat": {
id: "togethercomputer/llama-2-70b-chat",
organization: "Meta",
name: "LLaMA-2 Chat (70B)",
maxLength: 4096,
},
"togethercomputer/llama-2-7b-chat": {
id: "togethercomputer/llama-2-7b-chat",
organization: "Meta",
name: "LLaMA-2 Chat (7B)",
maxLength: 4096,
},
"togethercomputer/Llama-2-7B-32K-Instruct": {
id: "togethercomputer/Llama-2-7B-32K-Instruct",
organization: "Together",
name: "LLaMA-2-7B-32K-Instruct (7B)",
maxLength: 32768,
},
"mistralai/Mistral-7B-Instruct-v0.1": {
id: "mistralai/Mistral-7B-Instruct-v0.1",
organization: "MistralAI",
name: "Mistral (7B) Instruct v0.1",
maxLength: 4096,
},
"mistralai/Mistral-7B-Instruct-v0.2": {
id: "mistralai/Mistral-7B-Instruct-v0.2",
organization: "MistralAI",
name: "Mistral (7B) Instruct v0.2",
maxLength: 32768,
},
"mistralai/Mixtral-8x7B-Instruct-v0.1": {
id: "mistralai/Mixtral-8x7B-Instruct-v0.1",
organization: "MistralAI",
name: "Mixtral-8x7B Instruct",
maxLength: 32768,
},
"Gryphe/MythoMax-L2-13b": {
id: "Gryphe/MythoMax-L2-13b",
organization: "Gryphe",
name: "MythoMax-L2 (13B)",
maxLength: 4096,
},
"NousResearch/Nous-Hermes-llama-2-7b": {
id: "NousResearch/Nous-Hermes-llama-2-7b",
organization: "NousResearch",
name: "Nous Hermes LLaMA-2 (7B)",
maxLength: 4096,
},
"NousResearch/Nous-Hermes-Llama2-13b": {
id: "NousResearch/Nous-Hermes-Llama2-13b",
organization: "NousResearch",
name: "Nous Hermes Llama-2 (13B)",
maxLength: 4096,
},
"NousResearch/Nous-Hermes-Llama2-70b": {
id: "NousResearch/Nous-Hermes-Llama2-70b",
organization: "NousResearch",
name: "Nous Hermes Llama-2 (70B)",
maxLength: 4096,
},
"NousResearch/Nous-Hermes-2-Yi-34B": {
id: "NousResearch/Nous-Hermes-2-Yi-34B",
organization: "NousResearch",
name: "Nous Hermes-2 Yi (34B)",
maxLength: 4096,
},
"NousResearch/Nous-Capybara-7B-V1p9": {
id: "NousResearch/Nous-Capybara-7B-V1p9",
organization: "NousResearch",
name: "Nous Capybara v1.9 (7B)",
maxLength: 8192,
},
"openchat/openchat-3.5-1210": {
id: "openchat/openchat-3.5-1210",
organization: "OpenChat",
name: "OpenChat 3.5 1210 (7B)",
maxLength: 8192,
},
"teknium/OpenHermes-2-Mistral-7B": {
id: "teknium/OpenHermes-2-Mistral-7B",
organization: "teknium",
name: "OpenHermes-2-Mistral (7B)",
maxLength: 4096,
},
"teknium/OpenHermes-2p5-Mistral-7B": {
id: "teknium/OpenHermes-2p5-Mistral-7B",
organization: "teknium",
name: "OpenHermes-2.5-Mistral (7B)",
maxLength: 4096,
},
"Open-Orca/Mistral-7B-OpenOrca": {
id: "Open-Orca/Mistral-7B-OpenOrca",
organization: "OpenOrca",
name: "OpenOrca Mistral (7B) 8K",
maxLength: 8192,
},
"garage-bAInd/Platypus2-70B-instruct": {
id: "garage-bAInd/Platypus2-70B-instruct",
organization: "garage-bAInd",
name: "Platypus2 Instruct (70B)",
maxLength: 4096,
},
"togethercomputer/Pythia-Chat-Base-7B-v0.16": {
id: "togethercomputer/Pythia-Chat-Base-7B-v0.16",
organization: "Together",
name: "Pythia-Chat-Base (7B)",
maxLength: 2048,
},
"togethercomputer/Qwen-7B-Chat": {
id: "togethercomputer/Qwen-7B-Chat",
organization: "Qwen",
name: "Qwen-Chat (7B)",
maxLength: 8192,
},
"togethercomputer/RedPajama-INCITE-Chat-3B-v1": {
id: "togethercomputer/RedPajama-INCITE-Chat-3B-v1",
organization: "Together",
name: "RedPajama-INCITE Chat (3B)",
maxLength: 2048,
},
"togethercomputer/RedPajama-INCITE-7B-Chat": {
id: "togethercomputer/RedPajama-INCITE-7B-Chat",
organization: "Together",
name: "RedPajama-INCITE Chat (7B)",
maxLength: 2048,
},
"upstage/SOLAR-0-70b-16bit": {
id: "upstage/SOLAR-0-70b-16bit",
organization: "Upstage",
name: "SOLAR v0 (70B)",
maxLength: 4096,
},
"togethercomputer/StripedHyena-Nous-7B": {
id: "togethercomputer/StripedHyena-Nous-7B",
organization: "Together",
name: "StripedHyena Nous (7B)",
maxLength: 32768,
},
"lmsys/vicuna-7b-v1.5": {
id: "lmsys/vicuna-7b-v1.5",
organization: "LM Sys",
name: "Vicuna v1.5 (7B)",
maxLength: 4096,
},
"lmsys/vicuna-13b-v1.5": {
id: "lmsys/vicuna-13b-v1.5",
organization: "LM Sys",
name: "Vicuna v1.5 (13B)",
maxLength: 4096,
},
"lmsys/vicuna-13b-v1.5-16k": {
id: "lmsys/vicuna-13b-v1.5-16k",
organization: "LM Sys",
name: "Vicuna v1.5 16K (13B)",
maxLength: 16384,
},
"zero-one-ai/Yi-34B-Chat": {
id: "zero-one-ai/Yi-34B-Chat",
organization: "01.AI",
name: "01-ai Yi Chat (34B)",
maxLength: 4096,
},
};
module.exports.MODELS = MODELS;

View File

@ -0,0 +1 @@
*.json

View File

@ -0,0 +1,39 @@
| Organization | Model Name | Model String for API | Max Seq Length |
| ------------- | ---------------------------- | -------------------------------------------- | -------------- |
| Stanford | Alpaca (7B) | togethercomputer/alpaca-7b | 2048 |
| Austism | Chronos Hermes (13B) | Austism/chronos-hermes-13b | 2048 |
| Meta | Code Llama Instruct (13B) | togethercomputer/CodeLlama-13b-Instruct | 8192 |
| Meta | Code Llama Instruct (34B) | togethercomputer/CodeLlama-34b-Instruct | 8192 |
| Meta | Code Llama Instruct (7B) | togethercomputer/CodeLlama-7b-Instruct | 8192 |
| DiscoResearch | DiscoLM Mixtral 8x7b | DiscoResearch/DiscoLM-mixtral-8x7b-v2 | 32768 |
| TII UAE | Falcon Instruct (40B) | togethercomputer/falcon-40b-instruct | 2048 |
| TII UAE | Falcon Instruct (7B) | togethercomputer/falcon-7b-instruct | 2048 |
| Together | GPT-NeoXT-Chat-Base (20B) | togethercomputer/GPT-NeoXT-Chat-Base-20B | 2048 |
| Meta | LLaMA-2 Chat (13B) | togethercomputer/llama-2-13b-chat | 4096 |
| Meta | LLaMA-2 Chat (70B) | togethercomputer/llama-2-70b-chat | 4096 |
| Meta | LLaMA-2 Chat (7B) | togethercomputer/llama-2-7b-chat | 4096 |
| Together | LLaMA-2-7B-32K-Instruct (7B) | togethercomputer/Llama-2-7B-32K-Instruct | 32768 |
| MistralAI | Mistral (7B) Instruct v0.1 | mistralai/Mistral-7B-Instruct-v0.1 | 4096 |
| MistralAI | Mistral (7B) Instruct v0.2 | mistralai/Mistral-7B-Instruct-v0.2 | 32768 |
| MistralAI | Mixtral-8x7B Instruct | mistralai/Mixtral-8x7B-Instruct-v0.1 | 32768 |
| Gryphe | MythoMax-L2 (13B) | Gryphe/MythoMax-L2-13b | 4096 |
| NousResearch | Nous Hermes LLaMA-2 (7B) | NousResearch/Nous-Hermes-llama-2-7b | 4096 |
| NousResearch | Nous Hermes Llama-2 (13B) | NousResearch/Nous-Hermes-Llama2-13b | 4096 |
| NousResearch | Nous Hermes Llama-2 (70B) | NousResearch/Nous-Hermes-Llama2-70b | 4096 |
| NousResearch | Nous Hermes-2 Yi (34B) | NousResearch/Nous-Hermes-2-Yi-34B | 4096 |
| NousResearch | Nous Capybara v1.9 (7B) | NousResearch/Nous-Capybara-7B-V1p9 | 8192 |
| OpenChat | OpenChat 3.5 1210 (7B) | openchat/openchat-3.5-1210 | 8192 |
| teknium | OpenHermes-2-Mistral (7B) | teknium/OpenHermes-2-Mistral-7B | 4096 |
| teknium | OpenHermes-2.5-Mistral (7B) | teknium/OpenHermes-2p5-Mistral-7B | 4096 |
| OpenOrca | OpenOrca Mistral (7B) 8K | Open-Orca/Mistral-7B-OpenOrca | 8192 |
| garage-bAInd | Platypus2 Instruct (70B) | garage-bAInd/Platypus2-70B-instruct | 4096 |
| Together | Pythia-Chat-Base (7B) | togethercomputer/Pythia-Chat-Base-7B-v0.16 | 2048 |
| Qwen | Qwen-Chat (7B) | togethercomputer/Qwen-7B-Chat | 8192 |
| Together | RedPajama-INCITE Chat (3B) | togethercomputer/RedPajama-INCITE-Chat-3B-v1 | 2048 |
| Together | RedPajama-INCITE Chat (7B) | togethercomputer/RedPajama-INCITE-7B-Chat | 2048 |
| Upstage | SOLAR v0 (70B) | upstage/SOLAR-0-70b-16bit | 4096 |
| Together | StripedHyena Nous (7B) | togethercomputer/StripedHyena-Nous-7B | 32768 |
| LM Sys | Vicuna v1.5 (7B) | lmsys/vicuna-7b-v1.5 | 4096 |
| LM Sys | Vicuna v1.5 (13B) | lmsys/vicuna-13b-v1.5 | 4096 |
| LM Sys | Vicuna v1.5 16K (13B) | lmsys/vicuna-13b-v1.5-16k | 16384 |
| 01.AI | 01-ai Yi Chat (34B) | zero-one-ai/Yi-34B-Chat | 4096 |

View File

@ -0,0 +1,41 @@
// Together AI does not provide a simple REST API to get models,
// so we have a table which we copy from their documentation
// https://docs.together.ai/edit/inference-models that we can
// then parse and get all models from in a format that makes sense
// Why this does not exist is so bizarre, but whatever.
// To run, cd into this directory and run `node parse.mjs`
// copy outputs into the export in ../models.js
// Update the date below if you run this again because TogetherAI added new models.
// Last Collected: Jan 10, 2023
import fs from "fs";
function parseChatModels() {
const fixed = {};
const tableString = fs.readFileSync("chat_models.txt", { encoding: "utf-8" });
const rows = tableString.split("\n").slice(2);
rows.forEach((row) => {
const [provider, name, id, maxLength] = row.split("|").slice(1, -1);
const data = {
provider: provider.trim(),
name: name.trim(),
id: id.trim(),
maxLength: Number(maxLength.trim()),
};
fixed[data.id] = {
id: data.id,
organization: data.provider,
name: data.name,
maxLength: data.maxLength,
};
});
fs.writeFileSync("chat_models.json", JSON.stringify(fixed, null, 2), "utf-8");
return fixed;
}
parseChatModels();

View File

@ -1,6 +1,7 @@
const path = require("path");
const fs = require("fs");
const { toChunks } = require("../../helpers");
const { v4 } = require("uuid");
class NativeEmbedder {
constructor() {
@ -14,13 +15,30 @@ class NativeEmbedder {
this.modelPath = path.resolve(this.cacheDir, "Xenova", "all-MiniLM-L6-v2");
// Limit of how many strings we can process in a single pass to stay with resource or network limits
this.maxConcurrentChunks = 50;
this.maxConcurrentChunks = 25;
this.embeddingMaxChunkLength = 1_000;
// Make directory when it does not exist in existing installations
if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir);
}
#tempfilePath() {
const filename = `${v4()}.tmp`;
const tmpPath = process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "tmp")
: path.resolve(__dirname, `../../../storage/tmp`);
if (!fs.existsSync(tmpPath)) fs.mkdirSync(tmpPath, { recursive: true });
return path.resolve(tmpPath, filename);
}
async #writeToTempfile(filePath, data) {
try {
await fs.promises.appendFile(filePath, data, { encoding: "utf8" });
} catch (e) {
console.error(`Error writing to tempfile: ${e}`);
}
}
async embedderClient() {
if (!fs.existsSync(this.modelPath)) {
console.log(
@ -61,18 +79,51 @@ class NativeEmbedder {
return result?.[0] || [];
}
// If you are thinking you want to edit this function - you probably don't.
// This process was benchmarked heavily on a t3.small (2GB RAM 1vCPU)
// and without careful memory management for the V8 garbage collector
// this function will likely result in an OOM on any resource-constrained deployment.
// To help manage very large documents we run a concurrent write-log each iteration
// to keep the embedding result out of memory. The `maxConcurrentChunk` is set to 25,
// as 50 seems to overflow no matter what. Given the above, memory use hovers around ~30%
// during a very large document (>100K words) but can spike up to 70% before gc.
// This seems repeatable for all document sizes.
// While this does take a while, it is zero set up and is 100% free and on-instance.
async embedChunks(textChunks = []) {
const Embedder = await this.embedderClient();
const embeddingResults = [];
for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) {
const output = await Embedder(chunk, {
const tmpFilePath = this.#tempfilePath();
const chunks = toChunks(textChunks, this.maxConcurrentChunks);
const chunkLen = chunks.length;
for (let [idx, chunk] of chunks.entries()) {
if (idx === 0) await this.#writeToTempfile(tmpFilePath, "[");
let data;
let pipeline = await this.embedderClient();
let output = await pipeline(chunk, {
pooling: "mean",
normalize: true,
});
if (output.length === 0) continue;
embeddingResults.push(output.tolist());
if (output.length === 0) {
pipeline = null;
output = null;
data = null;
continue;
}
data = JSON.stringify(output.tolist());
await this.#writeToTempfile(tmpFilePath, data);
console.log(`\x1b[34m[Embedded Chunk ${idx + 1} of ${chunkLen}]\x1b[0m`);
if (chunkLen - 1 !== idx) await this.#writeToTempfile(tmpFilePath, ",");
if (chunkLen - 1 === idx) await this.#writeToTempfile(tmpFilePath, "]");
pipeline = null;
output = null;
data = null;
}
const embeddingResults = JSON.parse(
fs.readFileSync(tmpFilePath, { encoding: "utf-8" })
);
fs.rmSync(tmpFilePath, { force: true });
return embeddingResults.length > 0 ? embeddingResults.flat() : null;
}
}

View File

@ -71,7 +71,7 @@ async function chatWithWorkspace(
return await VALID_COMMANDS[command](workspace, message, uuid, user);
}
const LLMConnector = getLLMProvider();
const LLMConnector = getLLMProvider(workspace?.chatModel);
const VectorDb = getVectorDbClass();
const { safe, reasons = [] } = await LLMConnector.isSafe(message);
if (!safe) {
@ -91,6 +91,18 @@ async function chatWithWorkspace(
const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
const embeddingsCount = await VectorDb.namespaceCount(workspace.slug);
if (!hasVectorizedSpace || embeddingsCount === 0) {
if (chatMode === "query") {
return {
id: uuid,
type: "textResponse",
sources: [],
close: true,
error: null,
textResponse:
"There is no relevant information in this workspace to answer your query.",
};
}
// If there are no embeddings - chat like a normal LLM chat interface.
return await emptyEmbeddingChat({
uuid,
@ -131,6 +143,20 @@ async function chatWithWorkspace(
};
}
// If in query mode and no sources are found, do not
// let the LLM try to hallucinate a response or use general knowledge
if (chatMode === "query" && sources.length === 0) {
return {
id: uuid,
type: "textResponse",
sources: [],
close: true,
error: null,
textResponse:
"There is no relevant information in this workspace to answer your query.",
};
}
// Compress message to ensure prompt passes token limit with room for response
// and build system messages based on inputs and history.
const messages = await LLMConnector.compressMessages(
@ -145,7 +171,7 @@ async function chatWithWorkspace(
// Send the text completion.
const textResponse = await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? 0.7,
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
});
if (!textResponse) {

View File

@ -8,6 +8,7 @@ const {
chatPrompt,
} = require(".");
const VALID_CHAT_MODE = ["chat", "query"];
function writeResponseChunk(response, data) {
response.write(`data: ${JSON.stringify(data)}\n\n`);
return;
@ -29,7 +30,7 @@ async function streamChatWithWorkspace(
return;
}
const LLMConnector = getLLMProvider();
const LLMConnector = getLLMProvider(workspace?.chatModel);
const VectorDb = getVectorDbClass();
const { safe, reasons = [] } = await LLMConnector.isSafe(message);
if (!safe) {
@ -50,6 +51,19 @@ async function streamChatWithWorkspace(
const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
const embeddingsCount = await VectorDb.namespaceCount(workspace.slug);
if (!hasVectorizedSpace || embeddingsCount === 0) {
if (chatMode === "query") {
writeResponseChunk(response, {
id: uuid,
type: "textResponse",
textResponse:
"There is no relevant information in this workspace to answer your query.",
sources: [],
close: true,
error: null,
});
return;
}
// If there are no embeddings - chat like a normal LLM chat interface.
return await streamEmptyEmbeddingChat({
response,
@ -93,6 +107,21 @@ async function streamChatWithWorkspace(
return;
}
// If in query mode and no sources are found, do not
// let the LLM try to hallucinate a response or use general knowledge
if (chatMode === "query" && sources.length === 0) {
writeResponseChunk(response, {
id: uuid,
type: "textResponse",
textResponse:
"There is no relevant information in this workspace to answer your query.",
sources: [],
close: true,
error: null,
});
return;
}
// Compress message to ensure prompt passes token limit with room for response
// and build system messages based on inputs and history.
const messages = await LLMConnector.compressMessages(
@ -112,7 +141,7 @@ async function streamChatWithWorkspace(
`\x1b[31m[STREAMING DISABLED]\x1b[0m Streaming is not available for ${LLMConnector.constructor.name}. Will use regular chat method.`
);
completeText = await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? 0.7,
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
});
writeResponseChunk(response, {
uuid,
@ -124,7 +153,7 @@ async function streamChatWithWorkspace(
});
} else {
const stream = await LLMConnector.streamGetChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? 0.7,
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
});
completeText = await handleStreamResponses(response, stream, {
uuid,
@ -262,6 +291,96 @@ function handleStreamResponses(response, stream, responseProps) {
});
}
if (stream.type === "togetherAiStream") {
return new Promise((resolve) => {
let fullText = "";
let chunk = "";
stream.stream.data.on("data", (data) => {
const lines = data
?.toString()
?.split("\n")
.filter((line) => line.trim() !== "");
for (const line of lines) {
let validJSON = false;
const message = chunk + line.replace(/^data: /, "");
if (message !== "[DONE]") {
// JSON chunk is incomplete and has not ended yet
// so we need to stitch it together. You would think JSON
// chunks would only come complete - but they don't!
try {
JSON.parse(message);
validJSON = true;
} catch {}
if (!validJSON) {
// It can be possible that the chunk decoding is running away
// and the message chunk fails to append due to string length.
// In this case abort the chunk and reset so we can continue.
// ref: https://github.com/Mintplex-Labs/anything-llm/issues/416
try {
chunk += message;
} catch (e) {
console.error(`Chunk appending error`, e);
chunk = "";
}
continue;
} else {
chunk = "";
}
}
if (message == "[DONE]") {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
resolve(fullText);
} else {
let finishReason = null;
let token = "";
try {
const json = JSON.parse(message);
token = json?.choices?.[0]?.delta?.content;
finishReason = json?.choices?.[0]?.finish_reason || null;
} catch {
continue;
}
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (finishReason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
resolve(fullText);
}
}
}
});
});
}
// If stream is not a regular OpenAI Stream (like if using native model, Ollama, or most LangChain interfaces)
// we can just iterate the stream content instead.
if (!stream.hasOwnProperty("data")) {
@ -385,6 +504,7 @@ function handleStreamResponses(response, stream, responseProps) {
}
module.exports = {
VALID_CHAT_MODE,
streamChatWithWorkspace,
writeResponseChunk,
};

View File

@ -35,7 +35,7 @@ async function processDocument(filename = "") {
.then((res) => res)
.catch((e) => {
console.log(e.message);
return { success: false, reason: e.message };
return { success: false, reason: e.message, documents: [] };
});
}
@ -55,7 +55,7 @@ async function processLink(link = "") {
.then((res) => res)
.catch((e) => {
console.log(e.message);
return { success: false, reason: e.message };
return { success: false, reason: e.message, documents: [] };
});
}

View File

@ -2,32 +2,6 @@ const fs = require("fs");
const path = require("path");
const { v5: uuidv5 } = require("uuid");
async function collectDocumentData(folderName = null) {
if (!folderName) throw new Error("No docPath provided in request");
const folder =
process.env.NODE_ENV === "development"
? path.resolve(__dirname, `../../storage/documents/${folderName}`)
: path.resolve(process.env.STORAGE_DIR, `documents/${folderName}`);
const dirExists = fs.existsSync(folder);
if (!dirExists)
throw new Error(
`No documents folder for ${folderName} - did you run collector/main.py for this element?`
);
const files = fs.readdirSync(folder);
const fileData = [];
files.forEach((file) => {
if (path.extname(file) === ".json") {
const filePath = path.join(folder, file);
const data = fs.readFileSync(filePath, "utf8");
console.log(`Parsing document: ${file}`);
fileData.push(JSON.parse(data));
}
});
return fileData;
}
// Should take in a folder that is a subfolder of documents
// eg: youtube-subject/video-123.json
async function fileData(filePath = null) {
@ -35,8 +9,15 @@ async function fileData(filePath = null) {
const fullPath =
process.env.NODE_ENV === "development"
? path.resolve(__dirname, `../../storage/documents/${filePath}`)
: path.resolve(process.env.STORAGE_DIR, `documents/${filePath}`);
? path.resolve(
__dirname,
`../../storage/documents/${normalizePath(filePath)}`
)
: path.resolve(
process.env.STORAGE_DIR,
`documents/${normalizePath(filePath)}`
);
const fileExists = fs.existsSync(fullPath);
if (!fileExists) return null;
@ -142,11 +123,18 @@ async function storeVectorResult(vectorData = [], filename = null) {
async function purgeSourceDocument(filename = null) {
if (!filename) return;
console.log(`Purging source document of ${filename}.`);
const filePath =
process.env.NODE_ENV === "development"
? path.resolve(__dirname, `../../storage/documents`, filename)
: path.resolve(process.env.STORAGE_DIR, `documents`, filename);
? path.resolve(
__dirname,
`../../storage/documents`,
normalizePath(filename)
)
: path.resolve(
process.env.STORAGE_DIR,
`documents`,
normalizePath(filename)
);
if (!fs.existsSync(filePath)) return;
fs.rmSync(filePath);
@ -169,12 +157,54 @@ async function purgeVectorCache(filename = null) {
return;
}
// Search for a specific document by its unique name in the entire `documents`
// folder via iteration of all folders and checking if the expected file exists.
async function findDocumentInDocuments(documentName = null) {
if (!documentName) return null;
const documentsFolder =
process.env.NODE_ENV === "development"
? path.resolve(__dirname, `../../storage/documents`)
: path.resolve(process.env.STORAGE_DIR, `documents`);
for (const folder of fs.readdirSync(documentsFolder)) {
const isFolder = fs
.lstatSync(path.join(documentsFolder, folder))
.isDirectory();
if (!isFolder) continue;
const targetFilename = normalizePath(documentName);
const targetFileLocation = path.join(
documentsFolder,
folder,
targetFilename
);
if (!fs.existsSync(targetFileLocation)) continue;
const fileData = fs.readFileSync(targetFileLocation, "utf8");
const cachefilename = `${folder}/${targetFilename}`;
const { pageContent, ...metadata } = JSON.parse(fileData);
return {
name: targetFilename,
type: "file",
...metadata,
cached: await cachedVectorInformation(cachefilename, true),
};
}
return null;
}
function normalizePath(filepath = "") {
return path.normalize(filepath).replace(/^(\.\.(\/|\\|$))+/, "");
}
module.exports = {
findDocumentInDocuments,
cachedVectorInformation,
collectDocumentData,
viewLocalFiles,
purgeSourceDocument,
purgeVectorCache,
storeVectorResult,
fileData,
normalizePath,
};

View File

@ -2,6 +2,7 @@ const path = require("path");
const fs = require("fs");
const { getType } = require("mime");
const { User } = require("../../models/user");
const { normalizePath } = require(".");
function fetchPfp(pfpPath) {
if (!fs.existsSync(pfpPath)) {
@ -32,8 +33,7 @@ async function determinePfpFilepath(id) {
const basePath = process.env.STORAGE_DIR
? path.join(process.env.STORAGE_DIR, "assets/pfp")
: path.join(__dirname, "../../storage/assets/pfp");
const pfpFilepath = path.join(basePath, pfpFilename);
const pfpFilepath = path.join(basePath, normalizePath(pfpFilename));
if (!fs.existsSync(pfpFilepath)) return null;
return pfpFilepath;
}

View File

@ -1,7 +1,6 @@
const fs = require("fs");
const path = require("path");
const { purgeVectorCache, purgeSourceDocument } = require(".");
const { purgeVectorCache, purgeSourceDocument, normalizePath } = require(".");
const { Document } = require("../../models/documents");
const { Workspace } = require("../../models/workspace");
@ -22,10 +21,10 @@ async function purgeFolder(folderName) {
? path.resolve(__dirname, `../../storage/documents`)
: path.resolve(process.env.STORAGE_DIR, `documents`);
const folderPath = path.resolve(documentsFolder, folderName);
const folderPath = path.resolve(documentsFolder, normalizePath(folderName));
const filenames = fs
.readdirSync(folderPath)
.map((file) => path.join(folderName, file));
.map((file) => path.join(folderPath, file));
const workspaces = await Workspace.where();
const purgePromises = [];

Some files were not shown because too many files have changed in this diff Show More