mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-19 04:30:10 +01:00
merge master
This commit is contained in:
commit
a6a5084565
7
.vscode/settings.json
vendored
7
.vscode/settings.json
vendored
@ -5,6 +5,7 @@
|
|||||||
"AIbitat",
|
"AIbitat",
|
||||||
"allm",
|
"allm",
|
||||||
"anythingllm",
|
"anythingllm",
|
||||||
|
"Apipie",
|
||||||
"Astra",
|
"Astra",
|
||||||
"Chartable",
|
"Chartable",
|
||||||
"cleancss",
|
"cleancss",
|
||||||
@ -18,6 +19,7 @@
|
|||||||
"elevenlabs",
|
"elevenlabs",
|
||||||
"Embeddable",
|
"Embeddable",
|
||||||
"epub",
|
"epub",
|
||||||
|
"fireworksai",
|
||||||
"GROQ",
|
"GROQ",
|
||||||
"hljs",
|
"hljs",
|
||||||
"huggingface",
|
"huggingface",
|
||||||
@ -40,17 +42,18 @@
|
|||||||
"pagerender",
|
"pagerender",
|
||||||
"Qdrant",
|
"Qdrant",
|
||||||
"royalblue",
|
"royalblue",
|
||||||
"searxng",
|
|
||||||
"SearchApi",
|
"SearchApi",
|
||||||
|
"searxng",
|
||||||
"Serper",
|
"Serper",
|
||||||
"Serply",
|
"Serply",
|
||||||
"streamable",
|
"streamable",
|
||||||
"textgenwebui",
|
"textgenwebui",
|
||||||
"togetherai",
|
"togetherai",
|
||||||
"fireworksai",
|
|
||||||
"Unembed",
|
"Unembed",
|
||||||
|
"uuidv",
|
||||||
"vectordbs",
|
"vectordbs",
|
||||||
"Weaviate",
|
"Weaviate",
|
||||||
|
"XAILLM",
|
||||||
"Zilliz"
|
"Zilliz"
|
||||||
],
|
],
|
||||||
"eslint.experimental.useFlatConfig": true,
|
"eslint.experimental.useFlatConfig": true,
|
||||||
|
@ -94,6 +94,8 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
|
|||||||
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
|
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
|
||||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||||
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
||||||
|
- [Apipie](https://apipie.ai/)
|
||||||
|
- [xAI](https://x.ai/)
|
||||||
|
|
||||||
**Embedder models:**
|
**Embedder models:**
|
||||||
|
|
||||||
@ -116,6 +118,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
|
|||||||
- [PiperTTSLocal - runs in browser](https://github.com/rhasspy/piper)
|
- [PiperTTSLocal - runs in browser](https://github.com/rhasspy/piper)
|
||||||
- [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech/voice-options)
|
- [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech/voice-options)
|
||||||
- [ElevenLabs](https://elevenlabs.io/)
|
- [ElevenLabs](https://elevenlabs.io/)
|
||||||
|
- Any OpenAI Compatible TTS service.
|
||||||
|
|
||||||
**STT (speech-to-text) support:**
|
**STT (speech-to-text) support:**
|
||||||
|
|
||||||
|
@ -16,12 +16,14 @@ const extensions = require("./extensions");
|
|||||||
const { processRawText } = require("./processRawText");
|
const { processRawText } = require("./processRawText");
|
||||||
const { verifyPayloadIntegrity } = require("./middleware/verifyIntegrity");
|
const { verifyPayloadIntegrity } = require("./middleware/verifyIntegrity");
|
||||||
const app = express();
|
const app = express();
|
||||||
|
const FILE_LIMIT = "3GB";
|
||||||
|
|
||||||
app.use(cors({ origin: true }));
|
app.use(cors({ origin: true }));
|
||||||
app.use(
|
app.use(
|
||||||
bodyParser.text(),
|
bodyParser.text({ limit: FILE_LIMIT }),
|
||||||
bodyParser.json(),
|
bodyParser.json({ limit: FILE_LIMIT }),
|
||||||
bodyParser.urlencoded({
|
bodyParser.urlencoded({
|
||||||
|
limit: FILE_LIMIT,
|
||||||
extended: true,
|
extended: true,
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
"mime": "^3.0.0",
|
"mime": "^3.0.0",
|
||||||
"moment": "^2.29.4",
|
"moment": "^2.29.4",
|
||||||
"node-html-parser": "^6.1.13",
|
"node-html-parser": "^6.1.13",
|
||||||
|
"node-xlsx": "^0.24.0",
|
||||||
"officeparser": "^4.0.5",
|
"officeparser": "^4.0.5",
|
||||||
"openai": "4.38.5",
|
"openai": "4.38.5",
|
||||||
"pdf-parse": "^1.1.1",
|
"pdf-parse": "^1.1.1",
|
||||||
@ -48,4 +49,4 @@
|
|||||||
"nodemon": "^2.0.22",
|
"nodemon": "^2.0.22",
|
||||||
"prettier": "^2.4.1"
|
"prettier": "^2.4.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,8 @@ async function scrapeGenericUrl(link, textOnly = false) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const url = new URL(link);
|
const url = new URL(link);
|
||||||
const filename = (url.host + "-" + url.pathname).replace(".", "_");
|
const decodedPathname = decodeURIComponent(url.pathname);
|
||||||
|
const filename = `${url.hostname}${decodedPathname.replace(/\//g, "_")}`;
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
id: v4(),
|
id: v4(),
|
||||||
|
113
collector/processSingleFile/convert/asXlsx.js
Normal file
113
collector/processSingleFile/convert/asXlsx.js
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
const { v4 } = require("uuid");
|
||||||
|
const xlsx = require("node-xlsx").default;
|
||||||
|
const path = require("path");
|
||||||
|
const fs = require("fs");
|
||||||
|
const {
|
||||||
|
createdDate,
|
||||||
|
trashFile,
|
||||||
|
writeToServerDocuments,
|
||||||
|
} = require("../../utils/files");
|
||||||
|
const { tokenizeString } = require("../../utils/tokenizer");
|
||||||
|
const { default: slugify } = require("slugify");
|
||||||
|
|
||||||
|
function convertToCSV(data) {
|
||||||
|
return data
|
||||||
|
.map((row) =>
|
||||||
|
row
|
||||||
|
.map((cell) => {
|
||||||
|
if (cell === null || cell === undefined) return "";
|
||||||
|
if (typeof cell === "string" && cell.includes(","))
|
||||||
|
return `"${cell}"`;
|
||||||
|
return cell;
|
||||||
|
})
|
||||||
|
.join(",")
|
||||||
|
)
|
||||||
|
.join("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
async function asXlsx({ fullFilePath = "", filename = "" }) {
|
||||||
|
const documents = [];
|
||||||
|
const folderName = slugify(`${path.basename(filename)}-${v4().slice(0, 4)}`, {
|
||||||
|
lower: true,
|
||||||
|
trim: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
const outFolderPath =
|
||||||
|
process.env.NODE_ENV === "development"
|
||||||
|
? path.resolve(
|
||||||
|
__dirname,
|
||||||
|
`../../../server/storage/documents/${folderName}`
|
||||||
|
)
|
||||||
|
: path.resolve(process.env.STORAGE_DIR, `documents/${folderName}`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const workSheetsFromFile = xlsx.parse(fullFilePath);
|
||||||
|
if (!fs.existsSync(outFolderPath))
|
||||||
|
fs.mkdirSync(outFolderPath, { recursive: true });
|
||||||
|
|
||||||
|
for (const sheet of workSheetsFromFile) {
|
||||||
|
try {
|
||||||
|
const { name, data } = sheet;
|
||||||
|
const content = convertToCSV(data);
|
||||||
|
|
||||||
|
if (!content?.length) {
|
||||||
|
console.warn(`Sheet "${name}" is empty. Skipping.`);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`-- Processing sheet: ${name} --`);
|
||||||
|
const sheetData = {
|
||||||
|
id: v4(),
|
||||||
|
url: `file://${path.join(outFolderPath, `${slugify(name)}.csv`)}`,
|
||||||
|
title: `${filename} - Sheet:${name}`,
|
||||||
|
docAuthor: "Unknown",
|
||||||
|
description: `Spreadsheet data from sheet: ${name}`,
|
||||||
|
docSource: "an xlsx file uploaded by the user.",
|
||||||
|
chunkSource: "",
|
||||||
|
published: createdDate(fullFilePath),
|
||||||
|
wordCount: content.split(/\s+/).length,
|
||||||
|
pageContent: content,
|
||||||
|
token_count_estimate: tokenizeString(content).length,
|
||||||
|
};
|
||||||
|
|
||||||
|
const document = writeToServerDocuments(
|
||||||
|
sheetData,
|
||||||
|
`sheet-${slugify(name)}`,
|
||||||
|
outFolderPath
|
||||||
|
);
|
||||||
|
documents.push(document);
|
||||||
|
console.log(
|
||||||
|
`[SUCCESS]: Sheet "${name}" converted & ready for embedding.`
|
||||||
|
);
|
||||||
|
} catch (err) {
|
||||||
|
console.error(`Error processing sheet "${name}":`, err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error("Could not process xlsx file!", err);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
reason: `Error processing ${filename}: ${err.message}`,
|
||||||
|
documents: [],
|
||||||
|
};
|
||||||
|
} finally {
|
||||||
|
trashFile(fullFilePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (documents.length === 0) {
|
||||||
|
console.error(`No valid sheets found in ${filename}.`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
reason: `No valid sheets found in ${filename}.`,
|
||||||
|
documents: [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`[SUCCESS]: ${filename} fully processed. Created ${documents.length} document(s).\n`
|
||||||
|
);
|
||||||
|
return { success: true, reason: null, documents };
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = asXlsx;
|
@ -38,7 +38,7 @@ async function processSingleFile(targetFilename, options = {}) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const fileExtension = path.extname(fullFilePath).toLowerCase();
|
const fileExtension = path.extname(fullFilePath).toLowerCase();
|
||||||
if (!fileExtension) {
|
if (fullFilePath.includes(".") && !fileExtension) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
reason: `No file extension found. This file cannot be processed.`,
|
reason: `No file extension found. This file cannot be processed.`,
|
||||||
|
@ -11,6 +11,10 @@ const ACCEPTED_MIMES = {
|
|||||||
".pptx",
|
".pptx",
|
||||||
],
|
],
|
||||||
|
|
||||||
|
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
||||||
|
".xlsx",
|
||||||
|
],
|
||||||
|
|
||||||
"application/vnd.oasis.opendocument.text": [".odt"],
|
"application/vnd.oasis.opendocument.text": [".odt"],
|
||||||
"application/vnd.oasis.opendocument.presentation": [".odp"],
|
"application/vnd.oasis.opendocument.presentation": [".odp"],
|
||||||
|
|
||||||
@ -41,6 +45,8 @@ const SUPPORTED_FILETYPE_CONVERTERS = {
|
|||||||
".odt": "./convert/asOfficeMime.js",
|
".odt": "./convert/asOfficeMime.js",
|
||||||
".odp": "./convert/asOfficeMime.js",
|
".odp": "./convert/asOfficeMime.js",
|
||||||
|
|
||||||
|
".xlsx": "./convert/asXlsx.js",
|
||||||
|
|
||||||
".mbox": "./convert/asMbox.js",
|
".mbox": "./convert/asMbox.js",
|
||||||
|
|
||||||
".epub": "./convert/asEPub.js",
|
".epub": "./convert/asEPub.js",
|
||||||
|
@ -29,20 +29,36 @@ class GitHubRepoLoader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#validGithubUrl() {
|
#validGithubUrl() {
|
||||||
const UrlPattern = require("url-pattern");
|
try {
|
||||||
const pattern = new UrlPattern(
|
const url = new URL(this.repo);
|
||||||
"https\\://github.com/(:author)/(:project(*))",
|
|
||||||
{
|
|
||||||
// fixes project names with special characters (.github)
|
|
||||||
segmentValueCharset: "a-zA-Z0-9-._~%/+",
|
|
||||||
}
|
|
||||||
);
|
|
||||||
const match = pattern.match(this.repo);
|
|
||||||
if (!match) return false;
|
|
||||||
|
|
||||||
this.author = match.author;
|
// Not a github url at all.
|
||||||
this.project = match.project;
|
if (url.hostname !== "github.com") {
|
||||||
return true;
|
console.log(
|
||||||
|
`[Github Loader]: Invalid Github URL provided! Hostname must be 'github.com'. Got ${url.hostname}`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assume the url is in the format of github.com/{author}/{project}
|
||||||
|
// Remove the first slash from the pathname so we can split it properly.
|
||||||
|
const [author, project, ..._rest] = url.pathname.slice(1).split("/");
|
||||||
|
if (!author || !project) {
|
||||||
|
console.log(
|
||||||
|
`[Github Loader]: Invalid Github URL provided! URL must be in the format of 'github.com/{author}/{project}'. Got ${url.pathname}`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.author = author;
|
||||||
|
this.project = project;
|
||||||
|
return true;
|
||||||
|
} catch (e) {
|
||||||
|
console.log(
|
||||||
|
`[Github Loader]: Invalid Github URL provided! Error: ${e.message}`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the branch provided actually exists
|
// Ensure the branch provided actually exists
|
||||||
|
@ -108,7 +108,8 @@ async function bulkScrapePages(links, outFolderPath) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const url = new URL(link);
|
const url = new URL(link);
|
||||||
const filename = (url.host + "-" + url.pathname).replace(".", "_");
|
const decodedPathname = decodeURIComponent(url.pathname);
|
||||||
|
const filename = `${url.hostname}${decodedPathname.replace(/\//g, "_")}`;
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
id: v4(),
|
id: v4(),
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
const MimeLib = require("mime");
|
const MimeLib = require("mime");
|
||||||
|
const path = require("path");
|
||||||
class MimeDetector {
|
class MimeDetector {
|
||||||
nonTextTypes = ["multipart", "image", "model", "audio", "video"];
|
nonTextTypes = ["multipart", "image", "model", "audio", "video"];
|
||||||
badMimes = [
|
badMimes = [
|
||||||
@ -44,8 +44,26 @@ class MimeDetector {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// These are file types that are not detected by the mime library and need to be processed as text files.
|
||||||
|
// You should only add file types that are not detected by the mime library, are parsable as text, and are files
|
||||||
|
// with no extension. Otherwise, their extension should be added to the overrides array.
|
||||||
|
#specialTextFileTypes = ["dockerfile", "jenkinsfile"];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the MIME type of the file. If the file has no extension found, it will be processed as a text file.
|
||||||
|
* @param {string} filepath
|
||||||
|
* @returns {string}
|
||||||
|
*/
|
||||||
getType(filepath) {
|
getType(filepath) {
|
||||||
return this.lib.getType(filepath);
|
const parsedMime = this.lib.getType(filepath);
|
||||||
|
if (!!parsedMime) return parsedMime;
|
||||||
|
|
||||||
|
// If the mime could not be parsed, it could be a special file type like Dockerfile or Jenkinsfile
|
||||||
|
// which we can reliably process as text files.
|
||||||
|
const baseName = path.basename(filepath)?.toLowerCase();
|
||||||
|
if (this.#specialTextFileTypes.includes(baseName)) return "text/plain";
|
||||||
|
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2326,6 +2326,13 @@ node-html-parser@^6.1.13:
|
|||||||
css-select "^5.1.0"
|
css-select "^5.1.0"
|
||||||
he "1.2.0"
|
he "1.2.0"
|
||||||
|
|
||||||
|
node-xlsx@^0.24.0:
|
||||||
|
version "0.24.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/node-xlsx/-/node-xlsx-0.24.0.tgz#a6a365acb18ad37c66c2b254b6ebe0c22dc9dc6f"
|
||||||
|
integrity sha512-1olwK48XK9nXZsyH/FCltvGrQYvXXZuxVitxXXv2GIuRm51aBi1+5KwR4rWM4KeO61sFU+00913WLZTD+AcXEg==
|
||||||
|
dependencies:
|
||||||
|
xlsx "https://cdn.sheetjs.com/xlsx-0.20.2/xlsx-0.20.2.tgz"
|
||||||
|
|
||||||
nodemailer@6.9.13:
|
nodemailer@6.9.13:
|
||||||
version "6.9.13"
|
version "6.9.13"
|
||||||
resolved "https://registry.yarnpkg.com/nodemailer/-/nodemailer-6.9.13.tgz#5b292bf1e92645f4852ca872c56a6ba6c4a3d3d6"
|
resolved "https://registry.yarnpkg.com/nodemailer/-/nodemailer-6.9.13.tgz#5b292bf1e92645f4852ca872c56a6ba6c4a3d3d6"
|
||||||
@ -3528,6 +3535,10 @@ ws@8.14.2:
|
|||||||
resolved "https://registry.yarnpkg.com/ws/-/ws-8.14.2.tgz#6c249a806eb2db7a20d26d51e7709eab7b2e6c7f"
|
resolved "https://registry.yarnpkg.com/ws/-/ws-8.14.2.tgz#6c249a806eb2db7a20d26d51e7709eab7b2e6c7f"
|
||||||
integrity sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==
|
integrity sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==
|
||||||
|
|
||||||
|
"xlsx@https://cdn.sheetjs.com/xlsx-0.20.2/xlsx-0.20.2.tgz":
|
||||||
|
version "0.20.2"
|
||||||
|
resolved "https://cdn.sheetjs.com/xlsx-0.20.2/xlsx-0.20.2.tgz#0f64eeed3f1a46e64724620c3553f2dbd3cd2d7d"
|
||||||
|
|
||||||
xml2js@^0.6.2:
|
xml2js@^0.6.2:
|
||||||
version "0.6.2"
|
version "0.6.2"
|
||||||
resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.6.2.tgz#dd0b630083aa09c161e25a4d0901e2b2a929b499"
|
resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.6.2.tgz#dd0b630083aa09c161e25a4d0901e2b2a929b499"
|
||||||
|
@ -105,6 +105,14 @@ GID='1000'
|
|||||||
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
|
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
|
||||||
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
|
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
|
||||||
|
|
||||||
|
# LLM_PROVIDER='apipie'
|
||||||
|
# APIPIE_LLM_API_KEY='sk-123abc'
|
||||||
|
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
|
||||||
|
|
||||||
|
# LLM_PROVIDER='xai'
|
||||||
|
# XAI_LLM_API_KEY='xai-your-api-key-here'
|
||||||
|
# XAI_LLM_MODEL_PREF='grok-beta'
|
||||||
|
|
||||||
###########################################
|
###########################################
|
||||||
######## Embedding API SElECTION ##########
|
######## Embedding API SElECTION ##########
|
||||||
###########################################
|
###########################################
|
||||||
@ -215,6 +223,11 @@ GID='1000'
|
|||||||
# TTS_OPEN_AI_KEY=sk-example
|
# TTS_OPEN_AI_KEY=sk-example
|
||||||
# TTS_OPEN_AI_VOICE_MODEL=nova
|
# TTS_OPEN_AI_VOICE_MODEL=nova
|
||||||
|
|
||||||
|
# TTS_PROVIDER="generic-openai"
|
||||||
|
# TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
|
||||||
|
# TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
|
||||||
|
# TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
|
||||||
|
|
||||||
# TTS_PROVIDER="elevenlabs"
|
# TTS_PROVIDER="elevenlabs"
|
||||||
# TTS_ELEVEN_LABS_KEY=
|
# TTS_ELEVEN_LABS_KEY=
|
||||||
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
|
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
|
||||||
@ -270,4 +283,12 @@ GID='1000'
|
|||||||
# AGENT_SERPLY_API_KEY=
|
# AGENT_SERPLY_API_KEY=
|
||||||
|
|
||||||
#------ SearXNG ----------- https://github.com/searxng/searxng
|
#------ SearXNG ----------- https://github.com/searxng/searxng
|
||||||
# AGENT_SEARXNG_API_URL=
|
# AGENT_SEARXNG_API_URL=
|
||||||
|
|
||||||
|
###########################################
|
||||||
|
######## Other Configurations ############
|
||||||
|
###########################################
|
||||||
|
|
||||||
|
# Disable viewing chat history from the UI and frontend APIs.
|
||||||
|
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
|
||||||
|
# DISABLE_VIEW_CHAT_HISTORY=1
|
@ -22,7 +22,6 @@ const WorkspaceChat = lazy(() => import("@/pages/WorkspaceChat"));
|
|||||||
const AdminUsers = lazy(() => import("@/pages/Admin/Users"));
|
const AdminUsers = lazy(() => import("@/pages/Admin/Users"));
|
||||||
const AdminInvites = lazy(() => import("@/pages/Admin/Invitations"));
|
const AdminInvites = lazy(() => import("@/pages/Admin/Invitations"));
|
||||||
const AdminWorkspaces = lazy(() => import("@/pages/Admin/Workspaces"));
|
const AdminWorkspaces = lazy(() => import("@/pages/Admin/Workspaces"));
|
||||||
const AdminSystem = lazy(() => import("@/pages/Admin/System"));
|
|
||||||
const AdminLogs = lazy(() => import("@/pages/Admin/Logging"));
|
const AdminLogs = lazy(() => import("@/pages/Admin/Logging"));
|
||||||
const AdminAgents = lazy(() => import("@/pages/Admin/Agents"));
|
const AdminAgents = lazy(() => import("@/pages/Admin/Agents"));
|
||||||
const GeneralChats = lazy(() => import("@/pages/GeneralSettings/Chats"));
|
const GeneralChats = lazy(() => import("@/pages/GeneralSettings/Chats"));
|
||||||
@ -168,10 +167,6 @@ export default function App() {
|
|||||||
path="/settings/workspace-chats"
|
path="/settings/workspace-chats"
|
||||||
element={<ManagerRoute Component={GeneralChats} />}
|
element={<ManagerRoute Component={GeneralChats} />}
|
||||||
/>
|
/>
|
||||||
<Route
|
|
||||||
path="/settings/system-preferences"
|
|
||||||
element={<ManagerRoute Component={AdminSystem} />}
|
|
||||||
/>
|
|
||||||
<Route
|
<Route
|
||||||
path="/settings/invites"
|
path="/settings/invites"
|
||||||
element={<ManagerRoute Component={AdminInvites} />}
|
element={<ManagerRoute Component={AdminInvites} />}
|
||||||
|
50
frontend/src/components/CanViewChatHistory/index.jsx
Normal file
50
frontend/src/components/CanViewChatHistory/index.jsx
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import { useEffect, useState } from "react";
|
||||||
|
import { FullScreenLoader } from "@/components/Preloader";
|
||||||
|
import System from "@/models/system";
|
||||||
|
import paths from "@/utils/paths";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protects the view from system set ups who cannot view chat history.
|
||||||
|
* If the user cannot view chat history, they are redirected to the home page.
|
||||||
|
* @param {React.ReactNode} children
|
||||||
|
*/
|
||||||
|
export function CanViewChatHistory({ children }) {
|
||||||
|
const { loading, viewable } = useCanViewChatHistory();
|
||||||
|
if (loading) return <FullScreenLoader />;
|
||||||
|
if (!viewable) {
|
||||||
|
window.location.href = paths.home();
|
||||||
|
return <FullScreenLoader />;
|
||||||
|
}
|
||||||
|
|
||||||
|
return <>{children}</>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides the `viewable` state to the children.
|
||||||
|
* @returns {React.ReactNode}
|
||||||
|
*/
|
||||||
|
export function CanViewChatHistoryProvider({ children }) {
|
||||||
|
const { loading, viewable } = useCanViewChatHistory();
|
||||||
|
if (loading) return null;
|
||||||
|
return <>{children({ viewable })}</>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hook that fetches the can view chat history state from local storage or the system settings.
|
||||||
|
* @returns {Promise<{viewable: boolean, error: string | null}>}
|
||||||
|
*/
|
||||||
|
export function useCanViewChatHistory() {
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const [viewable, setViewable] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
async function fetchViewable() {
|
||||||
|
const { viewable } = await System.fetchCanViewChatHistory();
|
||||||
|
setViewable(viewable);
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
fetchViewable();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return { loading, viewable };
|
||||||
|
}
|
@ -36,6 +36,8 @@ export default function VoyageAiOptions({ settings }) {
|
|||||||
"voyage-code-2",
|
"voyage-code-2",
|
||||||
"voyage-large-2",
|
"voyage-large-2",
|
||||||
"voyage-2",
|
"voyage-2",
|
||||||
|
"voyage-3",
|
||||||
|
"voyage-3-lite",
|
||||||
].map((model) => {
|
].map((model) => {
|
||||||
return (
|
return (
|
||||||
<option key={model} value={model}>
|
<option key={model} value={model}>
|
||||||
|
101
frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
Normal file
101
frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import System from "@/models/system";
|
||||||
|
import { useState, useEffect } from "react";
|
||||||
|
|
||||||
|
export default function ApiPieLLMOptions({ settings }) {
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col gap-y-4 mt-1.5">
|
||||||
|
<div className="flex gap-[36px]">
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
APIpie API Key
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="password"
|
||||||
|
name="ApipieLLMApiKey"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="APIpie API Key"
|
||||||
|
defaultValue={settings?.ApipieLLMApiKey ? "*".repeat(20) : ""}
|
||||||
|
required={true}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
{!settings?.credentialsOnly && (
|
||||||
|
<APIPieModelSelection settings={settings} />
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function APIPieModelSelection({ settings }) {
|
||||||
|
const [groupedModels, setGroupedModels] = useState({});
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
async function findCustomModels() {
|
||||||
|
setLoading(true);
|
||||||
|
const { models } = await System.customModels("apipie");
|
||||||
|
if (models?.length > 0) {
|
||||||
|
const modelsByOrganization = models.reduce((acc, model) => {
|
||||||
|
acc[model.organization] = acc[model.organization] || [];
|
||||||
|
acc[model.organization].push(model);
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
|
|
||||||
|
setGroupedModels(modelsByOrganization);
|
||||||
|
}
|
||||||
|
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
findCustomModels();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
if (loading || Object.keys(groupedModels).length === 0) {
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
Chat Model Selection
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
name="ApipieLLMModelPref"
|
||||||
|
disabled={true}
|
||||||
|
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
|
>
|
||||||
|
<option disabled={true} selected={true}>
|
||||||
|
-- loading available models --
|
||||||
|
</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
Chat Model Selection
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
name="ApipieLLMModelPref"
|
||||||
|
required={true}
|
||||||
|
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
|
>
|
||||||
|
{Object.keys(groupedModels)
|
||||||
|
.sort()
|
||||||
|
.map((organization) => (
|
||||||
|
<optgroup key={organization} label={organization}>
|
||||||
|
{groupedModels[organization].map((model) => (
|
||||||
|
<option
|
||||||
|
key={model.id}
|
||||||
|
value={model.id}
|
||||||
|
selected={settings?.ApipieLLMModelPref === model.id}
|
||||||
|
>
|
||||||
|
{model.name}
|
||||||
|
</option>
|
||||||
|
))}
|
||||||
|
</optgroup>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
@ -71,23 +71,6 @@ export default function AzureAiOptions({ settings }) {
|
|||||||
</option>
|
</option>
|
||||||
</select>
|
</select>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="flex flex-col w-60">
|
|
||||||
<label className="text-white text-sm font-semibold block mb-3">
|
|
||||||
Embedding Deployment Name
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
name="AzureOpenAiEmbeddingModelPref"
|
|
||||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
|
||||||
placeholder="Azure OpenAI embedding model deployment name"
|
|
||||||
defaultValue={settings?.AzureOpenAiEmbeddingModelPref}
|
|
||||||
required={true}
|
|
||||||
autoComplete="off"
|
|
||||||
spellCheck={false}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div className="flex-flex-col w-60"></div>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
|
114
frontend/src/components/LLMSelection/XAiLLMOptions/index.jsx
Normal file
114
frontend/src/components/LLMSelection/XAiLLMOptions/index.jsx
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
import { useState, useEffect } from "react";
|
||||||
|
import System from "@/models/system";
|
||||||
|
|
||||||
|
export default function XAILLMOptions({ settings }) {
|
||||||
|
const [inputValue, setInputValue] = useState(settings?.XAIApiKey);
|
||||||
|
const [apiKey, setApiKey] = useState(settings?.XAIApiKey);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex gap-[36px] mt-1.5">
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
xAI API Key
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="password"
|
||||||
|
name="XAIApiKey"
|
||||||
|
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="xAI API Key"
|
||||||
|
defaultValue={settings?.XAIApiKey ? "*".repeat(20) : ""}
|
||||||
|
required={true}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
onChange={(e) => setInputValue(e.target.value)}
|
||||||
|
onBlur={() => setApiKey(inputValue)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{!settings?.credentialsOnly && (
|
||||||
|
<XAIModelSelection settings={settings} apiKey={apiKey} />
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function XAIModelSelection({ apiKey, settings }) {
|
||||||
|
const [customModels, setCustomModels] = useState([]);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
async function findCustomModels() {
|
||||||
|
if (!apiKey) {
|
||||||
|
setCustomModels([]);
|
||||||
|
setLoading(true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
setLoading(true);
|
||||||
|
const { models } = await System.customModels("xai", apiKey);
|
||||||
|
setCustomModels(models || []);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to fetch custom models:", error);
|
||||||
|
setCustomModels([]);
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
findCustomModels();
|
||||||
|
}, [apiKey]);
|
||||||
|
|
||||||
|
if (loading) {
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
Chat Model Selection
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
name="XAIModelPref"
|
||||||
|
disabled={true}
|
||||||
|
className="border-none bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
|
>
|
||||||
|
<option disabled={true} selected={true}>
|
||||||
|
--loading available models--
|
||||||
|
</option>
|
||||||
|
</select>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||||
|
Enter a valid API key to view all available models for your account.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
Chat Model Selection
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
name="XAIModelPref"
|
||||||
|
required={true}
|
||||||
|
className="border-none bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
|
>
|
||||||
|
{customModels.length > 0 && (
|
||||||
|
<optgroup label="Available models">
|
||||||
|
{customModels.map((model) => {
|
||||||
|
return (
|
||||||
|
<option
|
||||||
|
key={model.id}
|
||||||
|
value={model.id}
|
||||||
|
selected={settings?.XAIModelPref === model.id}
|
||||||
|
>
|
||||||
|
{model.id}
|
||||||
|
</option>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</optgroup>
|
||||||
|
)}
|
||||||
|
</select>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||||
|
Select the xAI model you want to use for your conversations.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
@ -31,7 +31,7 @@ export default function FileRow({ item, selected, toggleSelection }) {
|
|||||||
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px]"
|
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px]"
|
||||||
weight="fill"
|
weight="fill"
|
||||||
/>
|
/>
|
||||||
<p className="whitespace-nowrap overflow-hidden text-ellipsis">
|
<p className="whitespace-nowrap overflow-hidden text-ellipsis max-w-[400px]">
|
||||||
{middleTruncate(item.title, 55)}
|
{middleTruncate(item.title, 55)}
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
@ -51,7 +51,7 @@ export default function FolderRow({
|
|||||||
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px]"
|
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px]"
|
||||||
weight="fill"
|
weight="fill"
|
||||||
/>
|
/>
|
||||||
<p className="whitespace-nowrap overflow-show">
|
<p className="whitespace-nowrap overflow-show max-w-[400px]">
|
||||||
{middleTruncate(item.name, 35)}
|
{middleTruncate(item.name, 35)}
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
@ -83,7 +83,7 @@ export default function WorkspaceFileRow({
|
|||||||
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px] ml-1"
|
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px] ml-1"
|
||||||
weight="fill"
|
weight="fill"
|
||||||
/>
|
/>
|
||||||
<p className="whitespace-nowrap overflow-hidden text-ellipsis">
|
<p className="whitespace-nowrap overflow-hidden text-ellipsis max-w-[400px]">
|
||||||
{middleTruncate(item.title, 50)}
|
{middleTruncate(item.title, 50)}
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
@ -29,9 +29,7 @@ export default function SettingsButton() {
|
|||||||
return (
|
return (
|
||||||
<ToolTipWrapper id="open-settings">
|
<ToolTipWrapper id="open-settings">
|
||||||
<Link
|
<Link
|
||||||
to={
|
to={paths.settings.appearance()}
|
||||||
!!user?.role ? paths.settings.system() : paths.settings.appearance()
|
|
||||||
}
|
|
||||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||||
aria-label="Settings"
|
aria-label="Settings"
|
||||||
data-tooltip-id="open-settings"
|
data-tooltip-id="open-settings"
|
||||||
|
@ -149,17 +149,32 @@ function useIsExpanded({
|
|||||||
return { isExpanded, setIsExpanded };
|
return { isExpanded, setIsExpanded };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the child options are visible to the user.
|
||||||
|
* This hides the top level options if the child options are not visible
|
||||||
|
* for either the users permissions or the child options hidden prop is set to true by other means.
|
||||||
|
* If all child options return false for `isVisible` then the parent option will not be visible as well.
|
||||||
|
* @param {object} user - The user object.
|
||||||
|
* @param {array} childOptions - The child options.
|
||||||
|
* @returns {boolean} - True if the child options are visible, false otherwise.
|
||||||
|
*/
|
||||||
function hasVisibleOptions(user = null, childOptions = []) {
|
function hasVisibleOptions(user = null, childOptions = []) {
|
||||||
if (!Array.isArray(childOptions) || childOptions?.length === 0) return false;
|
if (!Array.isArray(childOptions) || childOptions?.length === 0) return false;
|
||||||
|
|
||||||
function isVisible({ roles = [], user = null, flex = false }) {
|
function isVisible({
|
||||||
|
roles = [],
|
||||||
|
user = null,
|
||||||
|
flex = false,
|
||||||
|
hidden = false,
|
||||||
|
}) {
|
||||||
|
if (hidden) return false;
|
||||||
if (!flex && !roles.includes(user?.role)) return false;
|
if (!flex && !roles.includes(user?.role)) return false;
|
||||||
if (flex && !!user && !roles.includes(user?.role)) return false;
|
if (flex && !!user && !roles.includes(user?.role)) return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return childOptions.some((opt) =>
|
return childOptions.some((opt) =>
|
||||||
isVisible({ roles: opt.roles, user, flex: opt.flex })
|
isVisible({ roles: opt.roles, user, flex: opt.flex, hidden: opt.hidden })
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import { useTranslation } from "react-i18next";
|
|||||||
import showToast from "@/utils/toast";
|
import showToast from "@/utils/toast";
|
||||||
import System from "@/models/system";
|
import System from "@/models/system";
|
||||||
import Option from "./MenuOption";
|
import Option from "./MenuOption";
|
||||||
|
import { CanViewChatHistoryProvider } from "../CanViewChatHistory";
|
||||||
|
|
||||||
export default function SettingsSidebar() {
|
export default function SettingsSidebar() {
|
||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
@ -208,156 +209,157 @@ function SupportEmail() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const SidebarOptions = ({ user = null, t }) => (
|
const SidebarOptions = ({ user = null, t }) => (
|
||||||
<>
|
<CanViewChatHistoryProvider>
|
||||||
<Option
|
{({ viewable: canViewChatHistory }) => (
|
||||||
btnText={t("settings.ai-providers")}
|
<>
|
||||||
icon={<Gear className="h-5 w-5 flex-shrink-0" />}
|
<Option
|
||||||
user={user}
|
btnText={t("settings.ai-providers")}
|
||||||
childOptions={[
|
icon={<Gear className="h-5 w-5 flex-shrink-0" />}
|
||||||
{
|
user={user}
|
||||||
btnText: t("settings.llm"),
|
childOptions={[
|
||||||
href: paths.settings.llmPreference(),
|
{
|
||||||
flex: true,
|
btnText: t("settings.llm"),
|
||||||
roles: ["admin"],
|
href: paths.settings.llmPreference(),
|
||||||
},
|
flex: true,
|
||||||
{
|
roles: ["admin"],
|
||||||
btnText: t("settings.vector-database"),
|
},
|
||||||
href: paths.settings.vectorDatabase(),
|
{
|
||||||
flex: true,
|
btnText: t("settings.vector-database"),
|
||||||
roles: ["admin"],
|
href: paths.settings.vectorDatabase(),
|
||||||
},
|
flex: true,
|
||||||
{
|
roles: ["admin"],
|
||||||
btnText: t("settings.embedder"),
|
},
|
||||||
href: paths.settings.embedder.modelPreference(),
|
{
|
||||||
flex: true,
|
btnText: t("settings.embedder"),
|
||||||
roles: ["admin"],
|
href: paths.settings.embedder.modelPreference(),
|
||||||
},
|
flex: true,
|
||||||
{
|
roles: ["admin"],
|
||||||
btnText: t("settings.text-splitting"),
|
},
|
||||||
href: paths.settings.embedder.chunkingPreference(),
|
{
|
||||||
flex: true,
|
btnText: t("settings.text-splitting"),
|
||||||
roles: ["admin"],
|
href: paths.settings.embedder.chunkingPreference(),
|
||||||
},
|
flex: true,
|
||||||
{
|
roles: ["admin"],
|
||||||
btnText: t("settings.voice-speech"),
|
},
|
||||||
href: paths.settings.audioPreference(),
|
{
|
||||||
flex: true,
|
btnText: t("settings.voice-speech"),
|
||||||
roles: ["admin"],
|
href: paths.settings.audioPreference(),
|
||||||
},
|
flex: true,
|
||||||
{
|
roles: ["admin"],
|
||||||
btnText: t("settings.transcription"),
|
},
|
||||||
href: paths.settings.transcriptionPreference(),
|
{
|
||||||
flex: true,
|
btnText: t("settings.transcription"),
|
||||||
roles: ["admin"],
|
href: paths.settings.transcriptionPreference(),
|
||||||
},
|
flex: true,
|
||||||
]}
|
roles: ["admin"],
|
||||||
/>
|
},
|
||||||
<Option
|
]}
|
||||||
btnText={t("settings.admin")}
|
/>
|
||||||
icon={<UserCircleGear className="h-5 w-5 flex-shrink-0" />}
|
<Option
|
||||||
user={user}
|
btnText={t("settings.admin")}
|
||||||
childOptions={[
|
icon={<UserCircleGear className="h-5 w-5 flex-shrink-0" />}
|
||||||
{
|
user={user}
|
||||||
btnText: t("settings.users"),
|
childOptions={[
|
||||||
href: paths.settings.users(),
|
{
|
||||||
roles: ["admin", "manager"],
|
btnText: t("settings.users"),
|
||||||
},
|
href: paths.settings.users(),
|
||||||
{
|
roles: ["admin", "manager"],
|
||||||
btnText: t("settings.workspaces"),
|
},
|
||||||
href: paths.settings.workspaces(),
|
{
|
||||||
roles: ["admin", "manager"],
|
btnText: t("settings.workspaces"),
|
||||||
},
|
href: paths.settings.workspaces(),
|
||||||
{
|
roles: ["admin", "manager"],
|
||||||
btnText: t("settings.workspace-chats"),
|
},
|
||||||
href: paths.settings.chats(),
|
{
|
||||||
flex: true,
|
hidden: !canViewChatHistory,
|
||||||
roles: ["admin", "manager"],
|
btnText: t("settings.workspace-chats"),
|
||||||
},
|
href: paths.settings.chats(),
|
||||||
{
|
flex: true,
|
||||||
btnText: t("settings.invites"),
|
roles: ["admin", "manager"],
|
||||||
href: paths.settings.invites(),
|
},
|
||||||
roles: ["admin", "manager"],
|
{
|
||||||
},
|
btnText: t("settings.invites"),
|
||||||
{
|
href: paths.settings.invites(),
|
||||||
btnText: t("settings.system"),
|
roles: ["admin", "manager"],
|
||||||
href: paths.settings.system(),
|
},
|
||||||
roles: ["admin", "manager"],
|
]}
|
||||||
},
|
/>
|
||||||
]}
|
<Option
|
||||||
/>
|
btnText={t("settings.agent-skills")}
|
||||||
<Option
|
icon={<Robot className="h-5 w-5 flex-shrink-0" />}
|
||||||
btnText={t("settings.agent-skills")}
|
href={paths.settings.agentSkills()}
|
||||||
icon={<Robot className="h-5 w-5 flex-shrink-0" />}
|
user={user}
|
||||||
href={paths.settings.agentSkills()}
|
flex={true}
|
||||||
user={user}
|
roles={["admin"]}
|
||||||
flex={true}
|
/>
|
||||||
roles={["admin"]}
|
<Option
|
||||||
/>
|
btnText={t("settings.customization")}
|
||||||
<Option
|
icon={<PencilSimpleLine className="h-5 w-5 flex-shrink-0" />}
|
||||||
btnText={t("settings.customization")}
|
href={paths.settings.appearance()}
|
||||||
icon={<PencilSimpleLine className="h-5 w-5 flex-shrink-0" />}
|
user={user}
|
||||||
href={paths.settings.appearance()}
|
flex={true}
|
||||||
user={user}
|
roles={["admin", "manager"]}
|
||||||
flex={true}
|
/>
|
||||||
roles={["admin", "manager"]}
|
<Option
|
||||||
/>
|
btnText={t("settings.tools")}
|
||||||
<Option
|
icon={<Toolbox className="h-5 w-5 flex-shrink-0" />}
|
||||||
btnText={t("settings.tools")}
|
user={user}
|
||||||
icon={<Toolbox className="h-5 w-5 flex-shrink-0" />}
|
childOptions={[
|
||||||
user={user}
|
{
|
||||||
childOptions={[
|
hidden: !canViewChatHistory,
|
||||||
{
|
btnText: t("settings.embed-chats"),
|
||||||
btnText: t("settings.embed-chats"),
|
href: paths.settings.embedChats(),
|
||||||
href: paths.settings.embedChats(),
|
flex: true,
|
||||||
flex: true,
|
roles: ["admin"],
|
||||||
roles: ["admin"],
|
},
|
||||||
},
|
{
|
||||||
{
|
btnText: t("settings.embeds"),
|
||||||
btnText: t("settings.embeds"),
|
href: paths.settings.embedSetup(),
|
||||||
href: paths.settings.embedSetup(),
|
flex: true,
|
||||||
flex: true,
|
roles: ["admin"],
|
||||||
roles: ["admin"],
|
},
|
||||||
},
|
{
|
||||||
{
|
btnText: t("settings.event-logs"),
|
||||||
btnText: t("settings.event-logs"),
|
href: paths.settings.logs(),
|
||||||
href: paths.settings.logs(),
|
flex: true,
|
||||||
flex: true,
|
roles: ["admin"],
|
||||||
roles: ["admin"],
|
},
|
||||||
},
|
{
|
||||||
{
|
btnText: t("settings.api-keys"),
|
||||||
btnText: t("settings.api-keys"),
|
href: paths.settings.apiKeys(),
|
||||||
href: paths.settings.apiKeys(),
|
flex: true,
|
||||||
flex: true,
|
roles: ["admin"],
|
||||||
roles: ["admin"],
|
},
|
||||||
},
|
{
|
||||||
{
|
btnText: t("settings.browser-extension"),
|
||||||
btnText: t("settings.browser-extension"),
|
href: paths.settings.browserExtension(),
|
||||||
href: paths.settings.browserExtension(),
|
flex: true,
|
||||||
flex: true,
|
roles: ["admin", "manager"],
|
||||||
roles: ["admin", "manager"],
|
},
|
||||||
},
|
]}
|
||||||
]}
|
/>
|
||||||
/>
|
<Option
|
||||||
<Option
|
btnText={t("settings.security")}
|
||||||
btnText={t("settings.security")}
|
icon={<Nut className="h-5 w-5 flex-shrink-0" />}
|
||||||
icon={<Nut className="h-5 w-5 flex-shrink-0" />}
|
href={paths.settings.security()}
|
||||||
href={paths.settings.security()}
|
user={user}
|
||||||
user={user}
|
flex={true}
|
||||||
flex={true}
|
roles={["admin", "manager"]}
|
||||||
roles={["admin", "manager"]}
|
hidden={user?.role}
|
||||||
hidden={user?.role}
|
/>
|
||||||
/>
|
<HoldToReveal key="exp_features">
|
||||||
<HoldToReveal key="exp_features">
|
<Option
|
||||||
<Option
|
btnText={t("settings.experimental-features")}
|
||||||
btnText={t("settings.experimental-features")}
|
icon={<Flask className="h-5 w-5 flex-shrink-0" />}
|
||||||
icon={<Flask className="h-5 w-5 flex-shrink-0" />}
|
href={paths.settings.experimental()}
|
||||||
href={paths.settings.experimental()}
|
user={user}
|
||||||
user={user}
|
flex={true}
|
||||||
flex={true}
|
roles={["admin"]}
|
||||||
roles={["admin"]}
|
/>
|
||||||
/>
|
</HoldToReveal>
|
||||||
</HoldToReveal>
|
</>
|
||||||
</>
|
)}
|
||||||
|
</CanViewChatHistoryProvider>
|
||||||
);
|
);
|
||||||
|
|
||||||
function HoldToReveal({ children, holdForMs = 3_000 }) {
|
function HoldToReveal({ children, holdForMs = 3_000 }) {
|
||||||
|
@ -0,0 +1,69 @@
|
|||||||
|
import React from "react";
|
||||||
|
|
||||||
|
export default function OpenAiGenericTextToSpeechOptions({ settings }) {
|
||||||
|
return (
|
||||||
|
<div className="w-full flex flex-col gap-y-7">
|
||||||
|
<div className="flex gap-x-4">
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<div className="flex justify-between items-center mb-2">
|
||||||
|
<label className="text-white text-sm font-semibold">Base URL</label>
|
||||||
|
</div>
|
||||||
|
<input
|
||||||
|
type="url"
|
||||||
|
name="TTSOpenAICompatibleEndpoint"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="http://localhost:7851/v1"
|
||||||
|
defaultValue={settings?.TTSOpenAICompatibleEndpoint}
|
||||||
|
required={false}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
/>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||||
|
This should be the base URL of the OpenAI compatible TTS service you
|
||||||
|
will generate TTS responses from.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
API Key
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="password"
|
||||||
|
name="TTSOpenAICompatibleKey"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="API Key"
|
||||||
|
defaultValue={
|
||||||
|
settings?.TTSOpenAICompatibleKey ? "*".repeat(20) : ""
|
||||||
|
}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
/>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||||
|
Some TTS services require an API key to generate TTS responses -
|
||||||
|
this is optional if your service does not require one.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
Voice Model
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
name="TTSOpenAICompatibleVoiceModel"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="Your voice model identifier"
|
||||||
|
defaultValue={settings?.TTSOpenAICompatibleVoiceModel}
|
||||||
|
required={true}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
/>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||||
|
Most TTS services will have several voice models available, this is
|
||||||
|
the identifier for the voice model you want to use.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
@ -135,7 +135,7 @@ export default function AccountModal({ user, hideModal }) {
|
|||||||
autoComplete="off"
|
autoComplete="off"
|
||||||
/>
|
/>
|
||||||
<p className="mt-2 text-xs text-white/60">
|
<p className="mt-2 text-xs text-white/60">
|
||||||
Username must be only contain lowercase letters, numbers,
|
Username must only contain lowercase letters, numbers,
|
||||||
underscores, and hyphens with no spaces
|
underscores, and hyphens with no spaces
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
@ -23,6 +23,7 @@ export default function TTSMessage({ slug, chatId, message }) {
|
|||||||
|
|
||||||
switch (provider) {
|
switch (provider) {
|
||||||
case "openai":
|
case "openai":
|
||||||
|
case "generic-openai":
|
||||||
case "elevenlabs":
|
case "elevenlabs":
|
||||||
return <AsyncTTSMessage slug={slug} chatId={chatId} />;
|
return <AsyncTTSMessage slug={slug} chatId={chatId} />;
|
||||||
case "piper_local":
|
case "piper_local":
|
||||||
|
@ -81,11 +81,13 @@ const HistoricalMessage = ({
|
|||||||
<div className="flex flex-col items-center">
|
<div className="flex flex-col items-center">
|
||||||
<ProfileImage role={role} workspace={workspace} />
|
<ProfileImage role={role} workspace={workspace} />
|
||||||
<div className="mt-1 -mb-10">
|
<div className="mt-1 -mb-10">
|
||||||
<TTSMessage
|
{role === "assistant" && (
|
||||||
slug={workspace?.slug}
|
<TTSMessage
|
||||||
chatId={chatId}
|
slug={workspace?.slug}
|
||||||
message={message}
|
chatId={chatId}
|
||||||
/>
|
message={message}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
{isEditing ? (
|
{isEditing ? (
|
||||||
|
@ -30,7 +30,7 @@ export function DnDFileUploaderProvider({ workspace, children }) {
|
|||||||
const { user } = useUser();
|
const { user } = useUser();
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!!user && user.role === "default") return false;
|
if (!!user && user.role === "default") return;
|
||||||
System.checkDocumentProcessorOnline().then((status) => setReady(status));
|
System.checkDocumentProcessorOnline().then((status) => setReady(status));
|
||||||
}, [user]);
|
}, [user]);
|
||||||
|
|
||||||
|
@ -122,9 +122,22 @@ export default function PromptInput({
|
|||||||
|
|
||||||
const pasteText = e.clipboardData.getData("text/plain");
|
const pasteText = e.clipboardData.getData("text/plain");
|
||||||
if (pasteText) {
|
if (pasteText) {
|
||||||
const newPromptInput = promptInput + pasteText.trim();
|
const textarea = textareaRef.current;
|
||||||
|
const start = textarea.selectionStart;
|
||||||
|
const end = textarea.selectionEnd;
|
||||||
|
const newPromptInput =
|
||||||
|
promptInput.substring(0, start) +
|
||||||
|
pasteText +
|
||||||
|
promptInput.substring(end);
|
||||||
setPromptInput(newPromptInput);
|
setPromptInput(newPromptInput);
|
||||||
onChange({ target: { value: newPromptInput } });
|
onChange({ target: { value: newPromptInput } });
|
||||||
|
|
||||||
|
// Set the cursor position after the pasted text
|
||||||
|
// we need to use setTimeout to prevent the cursor from being set to the end of the text
|
||||||
|
setTimeout(() => {
|
||||||
|
textarea.selectionStart = textarea.selectionEnd =
|
||||||
|
start + pasteText.length;
|
||||||
|
}, 0);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
@ -49,6 +49,7 @@ const PROVIDER_DEFAULT_MODELS = {
|
|||||||
textgenwebui: [],
|
textgenwebui: [],
|
||||||
"generic-openai": [],
|
"generic-openai": [],
|
||||||
bedrock: [],
|
bedrock: [],
|
||||||
|
xai: ["grok-beta"],
|
||||||
};
|
};
|
||||||
|
|
||||||
// For providers with large model lists (e.g. togetherAi) - we subgroup the options
|
// For providers with large model lists (e.g. togetherAi) - we subgroup the options
|
||||||
|
BIN
frontend/src/media/llmprovider/apipie.png
Normal file
BIN
frontend/src/media/llmprovider/apipie.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
frontend/src/media/llmprovider/xai.png
Normal file
BIN
frontend/src/media/llmprovider/xai.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
frontend/src/media/ttsproviders/generic-openai.png
Normal file
BIN
frontend/src/media/ttsproviders/generic-openai.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 29 KiB |
@ -9,6 +9,7 @@ const System = {
|
|||||||
footerIcons: "anythingllm_footer_links",
|
footerIcons: "anythingllm_footer_links",
|
||||||
supportEmail: "anythingllm_support_email",
|
supportEmail: "anythingllm_support_email",
|
||||||
customAppName: "anythingllm_custom_app_name",
|
customAppName: "anythingllm_custom_app_name",
|
||||||
|
canViewChatHistory: "anythingllm_can_view_chat_history",
|
||||||
},
|
},
|
||||||
ping: async function () {
|
ping: async function () {
|
||||||
return await fetch(`${API_BASE}/ping`)
|
return await fetch(`${API_BASE}/ping`)
|
||||||
@ -675,6 +676,36 @@ const System = {
|
|||||||
return false;
|
return false;
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetches the can view chat history state from local storage or the system settings.
|
||||||
|
* Notice: This is an instance setting that cannot be changed via the UI and it is cached
|
||||||
|
* in local storage for 24 hours.
|
||||||
|
* @returns {Promise<{viewable: boolean, error: string | null}>}
|
||||||
|
*/
|
||||||
|
fetchCanViewChatHistory: async function () {
|
||||||
|
const cache = window.localStorage.getItem(
|
||||||
|
this.cacheKeys.canViewChatHistory
|
||||||
|
);
|
||||||
|
const { viewable, lastFetched } = cache
|
||||||
|
? safeJsonParse(cache, { viewable: false, lastFetched: 0 })
|
||||||
|
: { viewable: false, lastFetched: 0 };
|
||||||
|
|
||||||
|
// Since this is an instance setting that cannot be changed via the UI,
|
||||||
|
// we can cache it in local storage for a day and if the admin changes it,
|
||||||
|
// they should instruct the users to clear local storage.
|
||||||
|
if (typeof viewable === "boolean" && Date.now() - lastFetched < 8.64e7)
|
||||||
|
return { viewable, error: null };
|
||||||
|
|
||||||
|
const res = await System.keys();
|
||||||
|
const isViewable = res?.DisableViewChatHistory === false;
|
||||||
|
|
||||||
|
window.localStorage.setItem(
|
||||||
|
this.cacheKeys.canViewChatHistory,
|
||||||
|
JSON.stringify({ viewable: isViewable, lastFetched: Date.now() })
|
||||||
|
);
|
||||||
|
return { viewable: isViewable, error: null };
|
||||||
|
},
|
||||||
experimentalFeatures: {
|
experimentalFeatures: {
|
||||||
liveSync: LiveDocumentSync,
|
liveSync: LiveDocumentSync,
|
||||||
agentPlugins: AgentPlugins,
|
agentPlugins: AgentPlugins,
|
||||||
|
@ -281,3 +281,38 @@ export function SearXNGOptions({ settings }) {
|
|||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function TavilySearchOptions({ settings }) {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<p className="text-sm text-white/60 my-2">
|
||||||
|
You can get an API key{" "}
|
||||||
|
<a
|
||||||
|
href="https://tavily.com/"
|
||||||
|
target="_blank"
|
||||||
|
rel="noreferrer"
|
||||||
|
className="text-blue-300 underline"
|
||||||
|
>
|
||||||
|
from Tavily.
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
<div className="flex gap-x-4">
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-3">
|
||||||
|
API Key
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="password"
|
||||||
|
name="env::AgentTavilyApiKey"
|
||||||
|
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="Tavily API Key"
|
||||||
|
defaultValue={settings?.AgentTavilyApiKey ? "*".repeat(20) : ""}
|
||||||
|
required={true}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="500" zoomAndPan="magnify" viewBox="0 0 375 374.999991" height="500" preserveAspectRatio="xMidYMid meet" version="1.0"><defs><clipPath id="d0348dc115"><path d="M 109.378906 231.132812 L 146.484375 231.132812 L 146.484375 268.238281 L 109.378906 268.238281 Z M 109.378906 231.132812 " clip-rule="nonzero"/></clipPath><clipPath id="a28b194a7a"><path d="M 127.933594 231.132812 C 117.6875 231.132812 109.378906 239.4375 109.378906 249.6875 C 109.378906 259.933594 117.6875 268.238281 127.933594 268.238281 C 138.179688 268.238281 146.484375 259.933594 146.484375 249.6875 C 146.484375 239.4375 138.179688 231.132812 127.933594 231.132812 Z M 127.933594 231.132812 " clip-rule="nonzero"/></clipPath></defs><path stroke-linecap="round" transform="matrix(0, -2.578223, 2.578223, 0, 113.745458, 254.140061)" fill="none" stroke-linejoin="miter" d="M 5.499573 5.500038 L 79.114962 5.500038 " stroke="#f25022" stroke-width="11" stroke-opacity="1" stroke-miterlimit="4"/><path stroke-linecap="round" transform="matrix(0, -2.578223, 2.578223, 0, 113.745458, 254.140061)" fill="none" stroke-linejoin="round" d="M 59.865692 -10.999336 L 81.864858 5.500038 L 59.865692 21.999412 " stroke="#f25022" stroke-width="11" stroke-opacity="1" stroke-miterlimit="4"/><path stroke-linecap="round" transform="matrix(2.578223, -0.000251357, 0.000251357, 2.578223, 126.828174, 239.987372)" fill="none" stroke-linejoin="miter" d="M 5.500751 5.50068 L 72.398214 5.499627 " stroke="#ffb901" stroke-width="11" stroke-opacity="1" stroke-miterlimit="4"/><path stroke-linecap="round" transform="matrix(2.578223, -0.000251357, 0.000251357, 2.578223, 126.828174, 239.987372)" fill="none" stroke-linejoin="round" d="M 53.149037 -11.000109 L 75.148109 5.499895 L 53.14885 22.000154 " stroke="#ffb901" stroke-width="11" stroke-opacity="1" stroke-miterlimit="4"/><path stroke-linecap="round" transform="matrix(-1.692446, 1.944957, -1.944957, -1.692446, 134.219043, 258.208373)" fill="none" stroke-linejoin="miter" d="M 4.499518 4.49999 L 38.441562 4.500107 " stroke="#04a3ec" stroke-width="9" stroke-opacity="1" stroke-miterlimit="4"/><path stroke-linecap="round" transform="matrix(-1.692446, 1.944957, -1.944957, -1.692446, 134.219043, 258.208373)" fill="none" stroke-linejoin="round" d="M 22.691248 -9.000192 L 40.69038 4.49943 L 22.68978 17.999994 " stroke="#04a3ec" stroke-width="9" stroke-opacity="1" stroke-miterlimit="4"/><g clip-path="url(#d0348dc115)"><g clip-path="url(#a28b194a7a)"><path fill="#32b37f" d="M 109.378906 231.132812 L 146.484375 231.132812 L 146.484375 268.238281 L 109.378906 268.238281 Z M 109.378906 231.132812 " fill-opacity="1" fill-rule="nonzero"/></g></g></svg>
|
After Width: | Height: | Size: 2.7 KiB |
@ -7,6 +7,7 @@ import SerperDotDevIcon from "./icons/serper.png";
|
|||||||
import BingSearchIcon from "./icons/bing.png";
|
import BingSearchIcon from "./icons/bing.png";
|
||||||
import SerplySearchIcon from "./icons/serply.png";
|
import SerplySearchIcon from "./icons/serply.png";
|
||||||
import SearXNGSearchIcon from "./icons/searxng.png";
|
import SearXNGSearchIcon from "./icons/searxng.png";
|
||||||
|
import TavilySearchIcon from "./icons/tavily.svg";
|
||||||
import {
|
import {
|
||||||
CaretUpDown,
|
CaretUpDown,
|
||||||
MagnifyingGlass,
|
MagnifyingGlass,
|
||||||
@ -22,6 +23,7 @@ import {
|
|||||||
BingSearchOptions,
|
BingSearchOptions,
|
||||||
SerplySearchOptions,
|
SerplySearchOptions,
|
||||||
SearXNGOptions,
|
SearXNGOptions,
|
||||||
|
TavilySearchOptions,
|
||||||
} from "./SearchProviderOptions";
|
} from "./SearchProviderOptions";
|
||||||
|
|
||||||
const SEARCH_PROVIDERS = [
|
const SEARCH_PROVIDERS = [
|
||||||
@ -81,6 +83,14 @@ const SEARCH_PROVIDERS = [
|
|||||||
description:
|
description:
|
||||||
"Free, open-source, internet meta-search engine with no tracking.",
|
"Free, open-source, internet meta-search engine with no tracking.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Tavily Search",
|
||||||
|
value: "tavily-search",
|
||||||
|
logo: TavilySearchIcon,
|
||||||
|
options: (settings) => <TavilySearchOptions settings={settings} />,
|
||||||
|
description:
|
||||||
|
"Tavily Search API. Offers a free tier with 1000 queries per month.",
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
export default function AgentWebSearchSelection({
|
export default function AgentWebSearchSelection({
|
||||||
|
@ -1,128 +0,0 @@
|
|||||||
import { useEffect, useState } from "react";
|
|
||||||
import Sidebar from "@/components/SettingsSidebar";
|
|
||||||
import { isMobile } from "react-device-detect";
|
|
||||||
import Admin from "@/models/admin";
|
|
||||||
import showToast from "@/utils/toast";
|
|
||||||
import CTAButton from "@/components/lib/CTAButton";
|
|
||||||
|
|
||||||
export default function AdminSystem() {
|
|
||||||
const [saving, setSaving] = useState(false);
|
|
||||||
const [hasChanges, setHasChanges] = useState(false);
|
|
||||||
const [messageLimit, setMessageLimit] = useState({
|
|
||||||
enabled: false,
|
|
||||||
limit: 10,
|
|
||||||
});
|
|
||||||
|
|
||||||
const handleSubmit = async (e) => {
|
|
||||||
e.preventDefault();
|
|
||||||
setSaving(true);
|
|
||||||
await Admin.updateSystemPreferences({
|
|
||||||
limit_user_messages: messageLimit.enabled,
|
|
||||||
message_limit: messageLimit.limit,
|
|
||||||
});
|
|
||||||
setSaving(false);
|
|
||||||
setHasChanges(false);
|
|
||||||
showToast("System preferences updated successfully.", "success");
|
|
||||||
};
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
async function fetchSettings() {
|
|
||||||
const settings = (await Admin.systemPreferences())?.settings;
|
|
||||||
if (!settings) return;
|
|
||||||
setMessageLimit({
|
|
||||||
enabled: settings.limit_user_messages,
|
|
||||||
limit: settings.message_limit,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
fetchSettings();
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
|
||||||
<Sidebar />
|
|
||||||
<div
|
|
||||||
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
|
||||||
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
|
||||||
>
|
|
||||||
<form
|
|
||||||
onSubmit={handleSubmit}
|
|
||||||
onChange={() => setHasChanges(true)}
|
|
||||||
className="flex flex-col w-full px-1 md:pl-6 md:pr-[50px] md:py-6 py-16"
|
|
||||||
>
|
|
||||||
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
|
||||||
<div className="items-center">
|
|
||||||
<p className="text-lg leading-6 font-bold text-white">
|
|
||||||
System Preferences
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
|
|
||||||
These are the overall settings and configurations of your
|
|
||||||
instance.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
{hasChanges && (
|
|
||||||
<div className="flex justify-end">
|
|
||||||
<CTAButton onClick={handleSubmit} className="mt-3 mr-0">
|
|
||||||
{saving ? "Saving..." : "Save changes"}
|
|
||||||
</CTAButton>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
<div className="mt-4 mb-8">
|
|
||||||
<div className="flex flex-col gap-y-1">
|
|
||||||
<h2 className="text-base leading-6 font-bold text-white">
|
|
||||||
Limit messages per user per day
|
|
||||||
</h2>
|
|
||||||
<p className="text-xs leading-[18px] font-base text-white/60">
|
|
||||||
Restrict non-admin users to a number of successful queries or
|
|
||||||
chats within a 24 hour window. Enable this to prevent users from
|
|
||||||
running up OpenAI costs.
|
|
||||||
</p>
|
|
||||||
<div className="mt-2">
|
|
||||||
<label className="relative inline-flex cursor-pointer items-center">
|
|
||||||
<input
|
|
||||||
type="checkbox"
|
|
||||||
name="limit_user_messages"
|
|
||||||
value="yes"
|
|
||||||
checked={messageLimit.enabled}
|
|
||||||
onChange={(e) => {
|
|
||||||
setMessageLimit({
|
|
||||||
...messageLimit,
|
|
||||||
enabled: e.target.checked,
|
|
||||||
});
|
|
||||||
}}
|
|
||||||
className="peer sr-only"
|
|
||||||
/>
|
|
||||||
<div className="pointer-events-none peer h-6 w-11 rounded-full bg-stone-400 after:absolute after:left-[2px] after:top-[2px] after:h-5 after:w-5 after:rounded-full after:shadow-xl after:border after:border-gray-600 after:bg-white after:box-shadow-md after:transition-all after:content-[''] peer-checked:bg-lime-300 peer-checked:after:translate-x-full peer-checked:after:border-white peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-800"></div>
|
|
||||||
<span className="ml-3 text-sm font-medium text-gray-900 dark:text-gray-300"></span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
{messageLimit.enabled && (
|
|
||||||
<div className="mt-4">
|
|
||||||
<label className="text-white text-sm font-semibold block mb-4">
|
|
||||||
Message limit per day
|
|
||||||
</label>
|
|
||||||
<div className="relative mt-2">
|
|
||||||
<input
|
|
||||||
type="number"
|
|
||||||
name="message_limit"
|
|
||||||
onScroll={(e) => e.target.blur()}
|
|
||||||
onChange={(e) => {
|
|
||||||
setMessageLimit({
|
|
||||||
enabled: true,
|
|
||||||
limit: Number(e?.target?.value || 0),
|
|
||||||
});
|
|
||||||
}}
|
|
||||||
value={messageLimit.limit}
|
|
||||||
min={1}
|
|
||||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-60 p-2.5"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
@ -2,11 +2,15 @@ import React, { useState } from "react";
|
|||||||
import { X } from "@phosphor-icons/react";
|
import { X } from "@phosphor-icons/react";
|
||||||
import Admin from "@/models/admin";
|
import Admin from "@/models/admin";
|
||||||
import { userFromStorage } from "@/utils/request";
|
import { userFromStorage } from "@/utils/request";
|
||||||
import { RoleHintDisplay } from "..";
|
import { MessageLimitInput, RoleHintDisplay } from "..";
|
||||||
|
|
||||||
export default function NewUserModal({ closeModal }) {
|
export default function NewUserModal({ closeModal }) {
|
||||||
const [error, setError] = useState(null);
|
const [error, setError] = useState(null);
|
||||||
const [role, setRole] = useState("default");
|
const [role, setRole] = useState("default");
|
||||||
|
const [messageLimit, setMessageLimit] = useState({
|
||||||
|
enabled: false,
|
||||||
|
limit: 10,
|
||||||
|
});
|
||||||
|
|
||||||
const handleCreate = async (e) => {
|
const handleCreate = async (e) => {
|
||||||
setError(null);
|
setError(null);
|
||||||
@ -14,6 +18,8 @@ export default function NewUserModal({ closeModal }) {
|
|||||||
const data = {};
|
const data = {};
|
||||||
const form = new FormData(e.target);
|
const form = new FormData(e.target);
|
||||||
for (var [key, value] of form.entries()) data[key] = value;
|
for (var [key, value] of form.entries()) data[key] = value;
|
||||||
|
data.dailyMessageLimit = messageLimit.enabled ? messageLimit.limit : null;
|
||||||
|
|
||||||
const { user, error } = await Admin.newUser(data);
|
const { user, error } = await Admin.newUser(data);
|
||||||
if (!!user) window.location.reload();
|
if (!!user) window.location.reload();
|
||||||
setError(error);
|
setError(error);
|
||||||
@ -58,13 +64,13 @@ export default function NewUserModal({ closeModal }) {
|
|||||||
pattern="^[a-z0-9_-]+$"
|
pattern="^[a-z0-9_-]+$"
|
||||||
onInvalid={(e) =>
|
onInvalid={(e) =>
|
||||||
e.target.setCustomValidity(
|
e.target.setCustomValidity(
|
||||||
"Username must be only contain lowercase letters, numbers, underscores, and hyphens with no spaces"
|
"Username must only contain lowercase letters, numbers, underscores, and hyphens with no spaces"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
onChange={(e) => e.target.setCustomValidity("")}
|
onChange={(e) => e.target.setCustomValidity("")}
|
||||||
/>
|
/>
|
||||||
<p className="mt-2 text-xs text-white/60">
|
<p className="mt-2 text-xs text-white/60">
|
||||||
Username must be only contain lowercase letters, numbers,
|
Username must only contain lowercase letters, numbers,
|
||||||
underscores, and hyphens with no spaces
|
underscores, and hyphens with no spaces
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
@ -110,6 +116,12 @@ export default function NewUserModal({ closeModal }) {
|
|||||||
</select>
|
</select>
|
||||||
<RoleHintDisplay role={role} />
|
<RoleHintDisplay role={role} />
|
||||||
</div>
|
</div>
|
||||||
|
<MessageLimitInput
|
||||||
|
role={role}
|
||||||
|
enabled={messageLimit.enabled}
|
||||||
|
limit={messageLimit.limit}
|
||||||
|
updateState={setMessageLimit}
|
||||||
|
/>
|
||||||
{error && <p className="text-red-400 text-sm">Error: {error}</p>}
|
{error && <p className="text-red-400 text-sm">Error: {error}</p>}
|
||||||
<p className="text-white text-xs md:text-sm">
|
<p className="text-white text-xs md:text-sm">
|
||||||
After creating a user they will need to login with their initial
|
After creating a user they will need to login with their initial
|
||||||
|
@ -1,11 +1,15 @@
|
|||||||
import React, { useState } from "react";
|
import React, { useState } from "react";
|
||||||
import { X } from "@phosphor-icons/react";
|
import { X } from "@phosphor-icons/react";
|
||||||
import Admin from "@/models/admin";
|
import Admin from "@/models/admin";
|
||||||
import { RoleHintDisplay } from "../..";
|
import { MessageLimitInput, RoleHintDisplay } from "../..";
|
||||||
|
|
||||||
export default function EditUserModal({ currentUser, user, closeModal }) {
|
export default function EditUserModal({ currentUser, user, closeModal }) {
|
||||||
const [role, setRole] = useState(user.role);
|
const [role, setRole] = useState(user.role);
|
||||||
const [error, setError] = useState(null);
|
const [error, setError] = useState(null);
|
||||||
|
const [messageLimit, setMessageLimit] = useState({
|
||||||
|
enabled: user.dailyMessageLimit !== null,
|
||||||
|
limit: user.dailyMessageLimit || 10,
|
||||||
|
});
|
||||||
|
|
||||||
const handleUpdate = async (e) => {
|
const handleUpdate = async (e) => {
|
||||||
setError(null);
|
setError(null);
|
||||||
@ -16,6 +20,12 @@ export default function EditUserModal({ currentUser, user, closeModal }) {
|
|||||||
if (!value || value === null) continue;
|
if (!value || value === null) continue;
|
||||||
data[key] = value;
|
data[key] = value;
|
||||||
}
|
}
|
||||||
|
if (messageLimit.enabled) {
|
||||||
|
data.dailyMessageLimit = messageLimit.limit;
|
||||||
|
} else {
|
||||||
|
data.dailyMessageLimit = null;
|
||||||
|
}
|
||||||
|
|
||||||
const { success, error } = await Admin.updateUser(user.id, data);
|
const { success, error } = await Admin.updateUser(user.id, data);
|
||||||
if (success) window.location.reload();
|
if (success) window.location.reload();
|
||||||
setError(error);
|
setError(error);
|
||||||
@ -58,7 +68,7 @@ export default function EditUserModal({ currentUser, user, closeModal }) {
|
|||||||
autoComplete="off"
|
autoComplete="off"
|
||||||
/>
|
/>
|
||||||
<p className="mt-2 text-xs text-white/60">
|
<p className="mt-2 text-xs text-white/60">
|
||||||
Username must be only contain lowercase letters, numbers,
|
Username must only contain lowercase letters, numbers,
|
||||||
underscores, and hyphens with no spaces
|
underscores, and hyphens with no spaces
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
@ -103,6 +113,12 @@ export default function EditUserModal({ currentUser, user, closeModal }) {
|
|||||||
</select>
|
</select>
|
||||||
<RoleHintDisplay role={role} />
|
<RoleHintDisplay role={role} />
|
||||||
</div>
|
</div>
|
||||||
|
<MessageLimitInput
|
||||||
|
role={role}
|
||||||
|
enabled={messageLimit.enabled}
|
||||||
|
limit={messageLimit.limit}
|
||||||
|
updateState={setMessageLimit}
|
||||||
|
/>
|
||||||
{error && <p className="text-red-400 text-sm">Error: {error}</p>}
|
{error && <p className="text-red-400 text-sm">Error: {error}</p>}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
@ -135,3 +135,58 @@ export function RoleHintDisplay({ role }) {
|
|||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function MessageLimitInput({ enabled, limit, updateState, role }) {
|
||||||
|
if (role === "admin") return null;
|
||||||
|
return (
|
||||||
|
<div className="mt-4 mb-8">
|
||||||
|
<div className="flex flex-col gap-y-1">
|
||||||
|
<div className="flex items-center gap-x-2">
|
||||||
|
<h2 className="text-base leading-6 font-bold text-white">
|
||||||
|
Limit messages per day
|
||||||
|
</h2>
|
||||||
|
<label className="relative inline-flex cursor-pointer items-center">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={enabled}
|
||||||
|
onChange={(e) => {
|
||||||
|
updateState((prev) => ({
|
||||||
|
...prev,
|
||||||
|
enabled: e.target.checked,
|
||||||
|
}));
|
||||||
|
}}
|
||||||
|
className="peer sr-only"
|
||||||
|
/>
|
||||||
|
<div className="pointer-events-none peer h-6 w-11 rounded-full bg-stone-400 after:absolute after:left-[2px] after:top-[2px] after:h-5 after:w-5 after:rounded-full after:shadow-xl after:border after:border-gray-600 after:bg-white after:box-shadow-md after:transition-all after:content-[''] peer-checked:bg-lime-300 peer-checked:after:translate-x-full peer-checked:after:border-white peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-800"></div>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white/60">
|
||||||
|
Restrict this user to a number of successful queries or chats within a
|
||||||
|
24 hour window.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
{enabled && (
|
||||||
|
<div className="mt-4">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-4">
|
||||||
|
Message limit per day
|
||||||
|
</label>
|
||||||
|
<div className="relative mt-2">
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
onScroll={(e) => e.target.blur()}
|
||||||
|
onChange={(e) => {
|
||||||
|
updateState({
|
||||||
|
enabled: true,
|
||||||
|
limit: Number(e?.target?.value || 0),
|
||||||
|
});
|
||||||
|
}}
|
||||||
|
value={limit}
|
||||||
|
min={1}
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-60 p-2.5"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -8,10 +8,13 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
|
|||||||
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
|
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
|
||||||
import ElevenLabsIcon from "@/media/ttsproviders/elevenlabs.png";
|
import ElevenLabsIcon from "@/media/ttsproviders/elevenlabs.png";
|
||||||
import PiperTTSIcon from "@/media/ttsproviders/piper.png";
|
import PiperTTSIcon from "@/media/ttsproviders/piper.png";
|
||||||
|
import GenericOpenAiLogo from "@/media/ttsproviders/generic-openai.png";
|
||||||
|
|
||||||
import BrowserNative from "@/components/TextToSpeech/BrowserNative";
|
import BrowserNative from "@/components/TextToSpeech/BrowserNative";
|
||||||
import OpenAiTTSOptions from "@/components/TextToSpeech/OpenAiOptions";
|
import OpenAiTTSOptions from "@/components/TextToSpeech/OpenAiOptions";
|
||||||
import ElevenLabsTTSOptions from "@/components/TextToSpeech/ElevenLabsOptions";
|
import ElevenLabsTTSOptions from "@/components/TextToSpeech/ElevenLabsOptions";
|
||||||
import PiperTTSOptions from "@/components/TextToSpeech/PiperTTSOptions";
|
import PiperTTSOptions from "@/components/TextToSpeech/PiperTTSOptions";
|
||||||
|
import OpenAiGenericTTSOptions from "@/components/TextToSpeech/OpenAiGenericOptions";
|
||||||
|
|
||||||
const PROVIDERS = [
|
const PROVIDERS = [
|
||||||
{
|
{
|
||||||
@ -42,6 +45,14 @@ const PROVIDERS = [
|
|||||||
options: (settings) => <PiperTTSOptions settings={settings} />,
|
options: (settings) => <PiperTTSOptions settings={settings} />,
|
||||||
description: "Run TTS models locally in your browser privately.",
|
description: "Run TTS models locally in your browser privately.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "OpenAI Compatible",
|
||||||
|
value: "generic-openai",
|
||||||
|
logo: GenericOpenAiLogo,
|
||||||
|
options: (settings) => <OpenAiGenericTTSOptions settings={settings} />,
|
||||||
|
description:
|
||||||
|
"Connect to an OpenAI compatible TTS service running locally or remotely.",
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
export default function TextToSpeechProvider({ settings }) {
|
export default function TextToSpeechProvider({ settings }) {
|
||||||
|
@ -11,6 +11,7 @@ import { CaretDown, Download, Sparkle, Trash } from "@phosphor-icons/react";
|
|||||||
import { saveAs } from "file-saver";
|
import { saveAs } from "file-saver";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import paths from "@/utils/paths";
|
import paths from "@/utils/paths";
|
||||||
|
import { CanViewChatHistory } from "@/components/CanViewChatHistory";
|
||||||
|
|
||||||
const exportOptions = {
|
const exportOptions = {
|
||||||
csv: {
|
csv: {
|
||||||
@ -106,7 +107,8 @@ export default function WorkspaceChats() {
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
async function fetchChats() {
|
async function fetchChats() {
|
||||||
const { chats: _chats, hasPages = false } = await System.chats(offset);
|
const { chats: _chats = [], hasPages = false } =
|
||||||
|
await System.chats(offset);
|
||||||
setChats(_chats);
|
setChats(_chats);
|
||||||
setCanNext(hasPages);
|
setCanNext(hasPages);
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
@ -115,85 +117,87 @@ export default function WorkspaceChats() {
|
|||||||
}, [offset]);
|
}, [offset]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
<CanViewChatHistory>
|
||||||
<Sidebar />
|
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
||||||
<div
|
<Sidebar />
|
||||||
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
<div
|
||||||
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
||||||
>
|
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
||||||
<div className="flex flex-col w-full px-1 md:pl-6 md:pr-[50px] md:py-6 py-16">
|
>
|
||||||
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
<div className="flex flex-col w-full px-1 md:pl-6 md:pr-[50px] md:py-6 py-16">
|
||||||
<div className="flex gap-x-4 items-center">
|
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
||||||
<p className="text-lg leading-6 font-bold text-white">
|
<div className="flex gap-x-4 items-center">
|
||||||
{t("recorded.title")}
|
<p className="text-lg leading-6 font-bold text-white">
|
||||||
</p>
|
{t("recorded.title")}
|
||||||
<div className="relative">
|
</p>
|
||||||
<button
|
<div className="relative">
|
||||||
ref={openMenuButton}
|
<button
|
||||||
onClick={toggleMenu}
|
ref={openMenuButton}
|
||||||
className="flex items-center gap-x-2 px-4 py-1 rounded-lg bg-primary-button hover:text-white text-xs font-semibold hover:bg-secondary shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
onClick={toggleMenu}
|
||||||
>
|
className="flex items-center gap-x-2 px-4 py-1 rounded-lg bg-primary-button hover:text-white text-xs font-semibold hover:bg-secondary shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
||||||
<Download size={18} weight="bold" />
|
>
|
||||||
{t("recorded.export")}
|
<Download size={18} weight="bold" />
|
||||||
<CaretDown size={18} weight="bold" />
|
{t("recorded.export")}
|
||||||
</button>
|
<CaretDown size={18} weight="bold" />
|
||||||
<div
|
</button>
|
||||||
ref={menuRef}
|
<div
|
||||||
className={`${
|
ref={menuRef}
|
||||||
showMenu ? "slide-down" : "slide-up hidden"
|
className={`${
|
||||||
} z-20 w-fit rounded-lg absolute top-full right-0 bg-secondary mt-2 shadow-md`}
|
showMenu ? "slide-down" : "slide-up hidden"
|
||||||
>
|
} z-20 w-fit rounded-lg absolute top-full right-0 bg-secondary mt-2 shadow-md`}
|
||||||
<div className="py-2">
|
>
|
||||||
{Object.entries(exportOptions).map(([key, data]) => (
|
<div className="py-2">
|
||||||
<button
|
{Object.entries(exportOptions).map(([key, data]) => (
|
||||||
key={key}
|
<button
|
||||||
onClick={() => {
|
key={key}
|
||||||
handleDumpChats(key);
|
onClick={() => {
|
||||||
setShowMenu(false);
|
handleDumpChats(key);
|
||||||
}}
|
setShowMenu(false);
|
||||||
className="w-full text-left px-4 py-2 text-white text-sm hover:bg-[#3D4147]"
|
}}
|
||||||
>
|
className="w-full text-left px-4 py-2 text-white text-sm hover:bg-[#3D4147]"
|
||||||
{data.name}
|
>
|
||||||
</button>
|
{data.name}
|
||||||
))}
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
{chats.length > 0 && (
|
||||||
|
<>
|
||||||
|
<button
|
||||||
|
onClick={handleClearAllChats}
|
||||||
|
className="flex items-center gap-x-2 px-4 py-1 border hover:border-transparent border-white/40 text-white/40 rounded-lg bg-transparent hover:text-white text-xs font-semibold hover:bg-red-500 shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
||||||
|
>
|
||||||
|
<Trash size={18} weight="bold" />
|
||||||
|
Clear Chats
|
||||||
|
</button>
|
||||||
|
<a
|
||||||
|
href={paths.orderFineTune()}
|
||||||
|
className="flex items-center gap-x-2 px-4 py-1 border hover:border-transparent border-yellow-300 text-yellow-300/80 rounded-lg bg-transparent hover:text-white text-xs font-semibold hover:bg-yellow-300/75 shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
||||||
|
>
|
||||||
|
<Sparkle size={18} weight="bold" />
|
||||||
|
Order Fine-Tune Model
|
||||||
|
</a>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
{chats.length > 0 && (
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
|
||||||
<>
|
{t("recorded.description")}
|
||||||
<button
|
</p>
|
||||||
onClick={handleClearAllChats}
|
|
||||||
className="flex items-center gap-x-2 px-4 py-1 border hover:border-transparent border-white/40 text-white/40 rounded-lg bg-transparent hover:text-white text-xs font-semibold hover:bg-red-500 shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
|
||||||
>
|
|
||||||
<Trash size={18} weight="bold" />
|
|
||||||
Clear Chats
|
|
||||||
</button>
|
|
||||||
<a
|
|
||||||
href={paths.orderFineTune()}
|
|
||||||
className="flex items-center gap-x-2 px-4 py-1 border hover:border-transparent border-yellow-300 text-yellow-300/80 rounded-lg bg-transparent hover:text-white text-xs font-semibold hover:bg-yellow-300/75 shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
|
||||||
>
|
|
||||||
<Sparkle size={18} weight="bold" />
|
|
||||||
Order Fine-Tune Model
|
|
||||||
</a>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
|
<ChatsContainer
|
||||||
{t("recorded.description")}
|
loading={loading}
|
||||||
</p>
|
chats={chats}
|
||||||
|
setChats={setChats}
|
||||||
|
offset={offset}
|
||||||
|
setOffset={setOffset}
|
||||||
|
canNext={canNext}
|
||||||
|
t={t}
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
<ChatsContainer
|
|
||||||
loading={loading}
|
|
||||||
chats={chats}
|
|
||||||
setChats={setChats}
|
|
||||||
offset={offset}
|
|
||||||
setOffset={setOffset}
|
|
||||||
canNext={canNext}
|
|
||||||
t={t}
|
|
||||||
/>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</CanViewChatHistory>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ import { CaretDown, Download } from "@phosphor-icons/react";
|
|||||||
import showToast from "@/utils/toast";
|
import showToast from "@/utils/toast";
|
||||||
import { saveAs } from "file-saver";
|
import { saveAs } from "file-saver";
|
||||||
import System from "@/models/system";
|
import System from "@/models/system";
|
||||||
|
import { CanViewChatHistory } from "@/components/CanViewChatHistory";
|
||||||
|
|
||||||
const exportOptions = {
|
const exportOptions = {
|
||||||
csv: {
|
csv: {
|
||||||
@ -88,59 +89,61 @@ export default function EmbedChats() {
|
|||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
<CanViewChatHistory>
|
||||||
<Sidebar />
|
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
||||||
<div
|
<Sidebar />
|
||||||
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
<div
|
||||||
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
||||||
>
|
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
||||||
<div className="flex flex-col w-full px-1 md:pl-6 md:pr-[86px] md:py-6 py-16">
|
>
|
||||||
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
<div className="flex flex-col w-full px-1 md:pl-6 md:pr-[86px] md:py-6 py-16">
|
||||||
<div className="flex gap-x-4 items-center">
|
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
||||||
<p className="text-lg leading-6 font-bold text-white">
|
<div className="flex gap-x-4 items-center">
|
||||||
{t("embed-chats.title")}
|
<p className="text-lg leading-6 font-bold text-white">
|
||||||
</p>
|
{t("embed-chats.title")}
|
||||||
<div className="relative">
|
</p>
|
||||||
<button
|
<div className="relative">
|
||||||
ref={openMenuButton}
|
<button
|
||||||
onClick={toggleMenu}
|
ref={openMenuButton}
|
||||||
className="flex items-center gap-x-2 px-4 py-1 rounded-lg bg-primary-button hover:text-white text-xs font-semibold hover:bg-secondary shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
onClick={toggleMenu}
|
||||||
>
|
className="flex items-center gap-x-2 px-4 py-1 rounded-lg bg-primary-button hover:text-white text-xs font-semibold hover:bg-secondary shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
|
||||||
<Download size={18} weight="bold" />
|
>
|
||||||
{t("embed-chats.export")}
|
<Download size={18} weight="bold" />
|
||||||
<CaretDown size={18} weight="bold" />
|
{t("embed-chats.export")}
|
||||||
</button>
|
<CaretDown size={18} weight="bold" />
|
||||||
<div
|
</button>
|
||||||
ref={menuRef}
|
<div
|
||||||
className={`${
|
ref={menuRef}
|
||||||
showMenu ? "slide-down" : "slide-up hidden"
|
className={`${
|
||||||
} z-20 w-fit rounded-lg absolute top-full right-0 bg-secondary mt-2 shadow-md`}
|
showMenu ? "slide-down" : "slide-up hidden"
|
||||||
>
|
} z-20 w-fit rounded-lg absolute top-full right-0 bg-secondary mt-2 shadow-md`}
|
||||||
<div className="py-2">
|
>
|
||||||
{Object.entries(exportOptions).map(([key, data]) => (
|
<div className="py-2">
|
||||||
<button
|
{Object.entries(exportOptions).map(([key, data]) => (
|
||||||
key={key}
|
<button
|
||||||
onClick={() => {
|
key={key}
|
||||||
handleDumpChats(key);
|
onClick={() => {
|
||||||
setShowMenu(false);
|
handleDumpChats(key);
|
||||||
}}
|
setShowMenu(false);
|
||||||
className="w-full text-left px-4 py-2 text-white text-sm hover:bg-[#3D4147]"
|
}}
|
||||||
>
|
className="w-full text-left px-4 py-2 text-white text-sm hover:bg-[#3D4147]"
|
||||||
{data.name}
|
>
|
||||||
</button>
|
{data.name}
|
||||||
))}
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
|
||||||
|
{t("embed-chats.description")}
|
||||||
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
|
<ChatsContainer />
|
||||||
{t("embed-chats.description")}
|
|
||||||
</p>
|
|
||||||
</div>
|
</div>
|
||||||
<ChatsContainer />
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</CanViewChatHistory>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ import CohereLogo from "@/media/llmprovider/cohere.png";
|
|||||||
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||||
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||||
|
import APIPieLogo from "@/media/llmprovider/apipie.png";
|
||||||
|
import XAILogo from "@/media/llmprovider/xai.png";
|
||||||
|
|
||||||
import PreLoader from "@/components/Preloader";
|
import PreLoader from "@/components/Preloader";
|
||||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||||
@ -48,6 +50,8 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
|||||||
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
||||||
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
|
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
|
||||||
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
||||||
|
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
|
||||||
|
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
|
||||||
|
|
||||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||||
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
||||||
@ -219,6 +223,27 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
|||||||
description: "Run DeepSeek's powerful LLMs.",
|
description: "Run DeepSeek's powerful LLMs.",
|
||||||
requiredConfig: ["DeepSeekApiKey"],
|
requiredConfig: ["DeepSeekApiKey"],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "AWS Bedrock",
|
||||||
|
value: "bedrock",
|
||||||
|
logo: AWSBedrockLogo,
|
||||||
|
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
|
||||||
|
description: "Run powerful foundation models privately with AWS Bedrock.",
|
||||||
|
requiredConfig: [
|
||||||
|
"AwsBedrockLLMAccessKeyId",
|
||||||
|
"AwsBedrockLLMAccessKey",
|
||||||
|
"AwsBedrockLLMRegion",
|
||||||
|
"AwsBedrockLLMModel",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "APIpie",
|
||||||
|
value: "apipie",
|
||||||
|
logo: APIPieLogo,
|
||||||
|
options: (settings) => <ApiPieLLMOptions settings={settings} />,
|
||||||
|
description: "A unified API of AI services from leading providers",
|
||||||
|
requiredConfig: ["ApipieLLMApiKey", "ApipieLLMModelPref"],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Generic OpenAI",
|
name: "Generic OpenAI",
|
||||||
value: "generic-openai",
|
value: "generic-openai",
|
||||||
@ -243,17 +268,12 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
|||||||
// requiredConfig: [],
|
// requiredConfig: [],
|
||||||
// },
|
// },
|
||||||
{
|
{
|
||||||
name: "AWS Bedrock",
|
name: "xAI",
|
||||||
value: "bedrock",
|
value: "xai",
|
||||||
logo: AWSBedrockLogo,
|
logo: XAILogo,
|
||||||
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
|
options: (settings) => <XAILLMOptions settings={settings} />,
|
||||||
description: "Run powerful foundation models privately with AWS Bedrock.",
|
description: "Run xAI's powerful LLMs like Grok-2 and more.",
|
||||||
requiredConfig: [
|
requiredConfig: ["XAIApiKey", "XAIModelPref"],
|
||||||
"AwsBedrockLLMAccessKeyId",
|
|
||||||
"AwsBedrockLLMAccessKey",
|
|
||||||
"AwsBedrockLLMRegion",
|
|
||||||
"AwsBedrockLLMModel",
|
|
||||||
],
|
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -21,6 +21,8 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
|||||||
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||||
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||||
|
import APIPieLogo from "@/media/llmprovider/apipie.png";
|
||||||
|
import XAILogo from "@/media/llmprovider/xai.png";
|
||||||
|
|
||||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||||
import ZillizLogo from "@/media/vectordbs/zilliz.png";
|
import ZillizLogo from "@/media/vectordbs/zilliz.png";
|
||||||
@ -202,6 +204,20 @@ export const LLM_SELECTION_PRIVACY = {
|
|||||||
description: ["Your model and chat contents are visible to DeepSeek"],
|
description: ["Your model and chat contents are visible to DeepSeek"],
|
||||||
logo: DeepSeekLogo,
|
logo: DeepSeekLogo,
|
||||||
},
|
},
|
||||||
|
apipie: {
|
||||||
|
name: "APIpie.AI",
|
||||||
|
description: [
|
||||||
|
"Your model and chat contents are visible to APIpie in accordance with their terms of service.",
|
||||||
|
],
|
||||||
|
logo: APIPieLogo,
|
||||||
|
},
|
||||||
|
xai: {
|
||||||
|
name: "xAI",
|
||||||
|
description: [
|
||||||
|
"Your model and chat contents are visible to xAI in accordance with their terms of service.",
|
||||||
|
],
|
||||||
|
logo: XAILogo,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export const VECTOR_DB_PRIVACY = {
|
export const VECTOR_DB_PRIVACY = {
|
||||||
|
@ -20,6 +20,8 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
|||||||
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||||
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||||
|
import APIPieLogo from "@/media/llmprovider/apipie.png";
|
||||||
|
import XAILogo from "@/media/llmprovider/xai.png";
|
||||||
|
|
||||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||||
@ -43,6 +45,8 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
|||||||
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
||||||
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
|
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
|
||||||
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
||||||
|
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
|
||||||
|
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
|
||||||
|
|
||||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||||
import System from "@/models/system";
|
import System from "@/models/system";
|
||||||
@ -193,6 +197,13 @@ const LLMS = [
|
|||||||
options: (settings) => <DeepSeekOptions settings={settings} />,
|
options: (settings) => <DeepSeekOptions settings={settings} />,
|
||||||
description: "Run DeepSeek's powerful LLMs.",
|
description: "Run DeepSeek's powerful LLMs.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "APIpie",
|
||||||
|
value: "apipie",
|
||||||
|
logo: APIPieLogo,
|
||||||
|
options: (settings) => <ApiPieLLMOptions settings={settings} />,
|
||||||
|
description: "A unified API of AI services from leading providers",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Generic OpenAI",
|
name: "Generic OpenAI",
|
||||||
value: "generic-openai",
|
value: "generic-openai",
|
||||||
@ -216,6 +227,13 @@ const LLMS = [
|
|||||||
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
|
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
|
||||||
description: "Run powerful foundation models privately with AWS Bedrock.",
|
description: "Run powerful foundation models privately with AWS Bedrock.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "xAI",
|
||||||
|
value: "xai",
|
||||||
|
logo: XAILogo,
|
||||||
|
options: (settings) => <XAILLMOptions settings={settings} />,
|
||||||
|
description: "Run xAI's powerful LLMs like Grok-2 and more.",
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
export default function LLMPreference({
|
export default function LLMPreference({
|
||||||
|
@ -24,6 +24,9 @@ const ENABLED_PROVIDERS = [
|
|||||||
"bedrock",
|
"bedrock",
|
||||||
"fireworksai",
|
"fireworksai",
|
||||||
"deepseek",
|
"deepseek",
|
||||||
|
"litellm",
|
||||||
|
"apipie",
|
||||||
|
"xai",
|
||||||
// TODO: More agent support.
|
// TODO: More agent support.
|
||||||
// "cohere", // Has tool calling and will need to build explicit support
|
// "cohere", // Has tool calling and will need to build explicit support
|
||||||
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
|
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
|
||||||
|
@ -5,14 +5,30 @@ import paths from "@/utils/paths";
|
|||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { Link, useParams } from "react-router-dom";
|
import { Link, useParams } from "react-router-dom";
|
||||||
|
|
||||||
// These models do NOT support function calling
|
/**
|
||||||
|
* These models do NOT support function calling
|
||||||
|
* or do not support system prompts
|
||||||
|
* and therefore are not supported for agents.
|
||||||
|
* @param {string} provider - The AI provider.
|
||||||
|
* @param {string} model - The model name.
|
||||||
|
* @returns {boolean} Whether the model is supported for agents.
|
||||||
|
*/
|
||||||
function supportedModel(provider, model = "") {
|
function supportedModel(provider, model = "") {
|
||||||
if (provider !== "openai") return true;
|
if (provider === "openai") {
|
||||||
return (
|
return (
|
||||||
["gpt-3.5-turbo-0301", "gpt-4-turbo-2024-04-09", "gpt-4-turbo"].includes(
|
[
|
||||||
model
|
"gpt-3.5-turbo-0301",
|
||||||
) === false
|
"gpt-4-turbo-2024-04-09",
|
||||||
);
|
"gpt-4-turbo",
|
||||||
|
"o1-preview",
|
||||||
|
"o1-preview-2024-09-12",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-mini-2024-09-12",
|
||||||
|
].includes(model) === false
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function AgentModelSelection({
|
export default function AgentModelSelection({
|
||||||
|
@ -8,8 +8,10 @@ import Admin from "@/models/admin";
|
|||||||
import * as Skeleton from "react-loading-skeleton";
|
import * as Skeleton from "react-loading-skeleton";
|
||||||
import "react-loading-skeleton/dist/skeleton.css";
|
import "react-loading-skeleton/dist/skeleton.css";
|
||||||
import paths from "@/utils/paths";
|
import paths from "@/utils/paths";
|
||||||
|
import useUser from "@/hooks/useUser";
|
||||||
|
|
||||||
export default function WorkspaceAgentConfiguration({ workspace }) {
|
export default function WorkspaceAgentConfiguration({ workspace }) {
|
||||||
|
const { user } = useUser();
|
||||||
const [settings, setSettings] = useState({});
|
const [settings, setSettings] = useState({});
|
||||||
const [hasChanges, setHasChanges] = useState(false);
|
const [hasChanges, setHasChanges] = useState(false);
|
||||||
const [saving, setSaving] = useState(false);
|
const [saving, setSaving] = useState(false);
|
||||||
@ -84,21 +86,26 @@ export default function WorkspaceAgentConfiguration({ workspace }) {
|
|||||||
workspace={workspace}
|
workspace={workspace}
|
||||||
setHasChanges={setHasChanges}
|
setHasChanges={setHasChanges}
|
||||||
/>
|
/>
|
||||||
{!hasChanges && (
|
{(!user || user?.role === "admin") && (
|
||||||
<div className="flex flex-col gap-y-4">
|
<>
|
||||||
<a
|
{!hasChanges && (
|
||||||
className="w-fit transition-all duration-300 border border-slate-200 px-5 py-2.5 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
|
<div className="flex flex-col gap-y-4">
|
||||||
href={paths.settings.agentSkills()}
|
<a
|
||||||
>
|
className="w-fit transition-all duration-300 border border-slate-200 px-5 py-2.5 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
|
||||||
Configure Agent Skills
|
href={paths.settings.agentSkills()}
|
||||||
</a>
|
>
|
||||||
<p className="text-white text-opacity-60 text-xs font-medium">
|
Configure Agent Skills
|
||||||
Customize and enhance the default agent's capabilities by enabling
|
</a>
|
||||||
or disabling specific skills. These settings will be applied
|
<p className="text-white text-opacity-60 text-xs font-medium">
|
||||||
across all workspaces.
|
Customize and enhance the default agent's capabilities by
|
||||||
</p>
|
enabling or disabling specific skills. These settings will be
|
||||||
</div>
|
applied across all workspaces.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
{hasChanges && (
|
{hasChanges && (
|
||||||
<button
|
<button
|
||||||
type="submit"
|
type="submit"
|
||||||
|
@ -8,15 +8,18 @@ import { useTranslation } from "react-i18next";
|
|||||||
import { Link } from "react-router-dom";
|
import { Link } from "react-router-dom";
|
||||||
import paths from "@/utils/paths";
|
import paths from "@/utils/paths";
|
||||||
|
|
||||||
// Some providers can only be associated with a single model.
|
// Some providers do not support model selection via /models.
|
||||||
// In that case there is no selection to be made so we can just move on.
|
// In that case we allow the user to enter the model name manually and hope they
|
||||||
const NO_MODEL_SELECTION = [
|
// type it correctly.
|
||||||
"default",
|
const FREE_FORM_LLM_SELECTION = ["bedrock", "azure", "generic-openai"];
|
||||||
"huggingface",
|
|
||||||
"generic-openai",
|
// Some providers do not support model selection via /models
|
||||||
"bedrock",
|
// and only have a fixed single-model they can use.
|
||||||
];
|
const NO_MODEL_SELECTION = ["default", "huggingface"];
|
||||||
const DISABLED_PROVIDERS = ["azure", "native"];
|
|
||||||
|
// Some providers we just fully disable for ease of use.
|
||||||
|
const DISABLED_PROVIDERS = ["native"];
|
||||||
|
|
||||||
const LLM_DEFAULT = {
|
const LLM_DEFAULT = {
|
||||||
name: "System default",
|
name: "System default",
|
||||||
value: "default",
|
value: "default",
|
||||||
@ -65,8 +68,8 @@ export default function WorkspaceLLMSelection({
|
|||||||
);
|
);
|
||||||
setFilteredLLMs(filtered);
|
setFilteredLLMs(filtered);
|
||||||
}, [LLMS, searchQuery, selectedLLM]);
|
}, [LLMS, searchQuery, selectedLLM]);
|
||||||
|
|
||||||
const selectedLLMObject = LLMS.find((llm) => llm.value === selectedLLM);
|
const selectedLLMObject = LLMS.find((llm) => llm.value === selectedLLM);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="border-b border-white/40 pb-8">
|
<div className="border-b border-white/40 pb-8">
|
||||||
<div className="flex flex-col">
|
<div className="flex flex-col">
|
||||||
@ -155,30 +158,66 @@ export default function WorkspaceLLMSelection({
|
|||||||
</button>
|
</button>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
{NO_MODEL_SELECTION.includes(selectedLLM) ? (
|
<ModelSelector
|
||||||
<>
|
selectedLLM={selectedLLM}
|
||||||
{selectedLLM !== "default" && (
|
workspace={workspace}
|
||||||
<div className="w-full h-10 justify-center items-center flex mt-4">
|
setHasChanges={setHasChanges}
|
||||||
<p className="text-sm font-base text-white text-opacity-60 text-center">
|
/>
|
||||||
Multi-model support is not supported for this provider yet.
|
</div>
|
||||||
<br />
|
);
|
||||||
This workspace will use{" "}
|
}
|
||||||
<Link to={paths.settings.llmPreference()} className="underline">
|
|
||||||
the model set for the system.
|
// TODO: Add this to agent selector as well as make generic component.
|
||||||
</Link>
|
function ModelSelector({ selectedLLM, workspace, setHasChanges }) {
|
||||||
</p>
|
if (NO_MODEL_SELECTION.includes(selectedLLM)) {
|
||||||
</div>
|
if (selectedLLM !== "default") {
|
||||||
)}
|
return (
|
||||||
</>
|
<div className="w-full h-10 justify-center items-center flex mt-4">
|
||||||
) : (
|
<p className="text-sm font-base text-white text-opacity-60 text-center">
|
||||||
<div className="mt-4 flex flex-col gap-y-1">
|
Multi-model support is not supported for this provider yet.
|
||||||
<ChatModelSelection
|
<br />
|
||||||
provider={selectedLLM}
|
This workspace will use{" "}
|
||||||
workspace={workspace}
|
<Link to={paths.settings.llmPreference()} className="underline">
|
||||||
setHasChanges={setHasChanges}
|
the model set for the system.
|
||||||
/>
|
</Link>
|
||||||
</div>
|
</p>
|
||||||
)}
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FREE_FORM_LLM_SELECTION.includes(selectedLLM)) {
|
||||||
|
return (
|
||||||
|
<FreeFormLLMInput workspace={workspace} setHasChanges={setHasChanges} />
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChatModelSelection
|
||||||
|
provider={selectedLLM}
|
||||||
|
workspace={workspace}
|
||||||
|
setHasChanges={setHasChanges}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function FreeFormLLMInput({ workspace, setHasChanges }) {
|
||||||
|
const { t } = useTranslation();
|
||||||
|
return (
|
||||||
|
<div className="mt-4 flex flex-col gap-y-1">
|
||||||
|
<label className="block input-label">{t("chat.model.title")}</label>
|
||||||
|
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
|
||||||
|
{t("chat.model.description")}
|
||||||
|
</p>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
name="chatModel"
|
||||||
|
defaultValue={workspace?.chatModel || ""}
|
||||||
|
onChange={() => setHasChanges(true)}
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||||
|
placeholder="Enter model name exactly as referenced in the API (e.g., gpt-3.5-turbo)"
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -80,9 +80,6 @@ export default {
|
|||||||
return `/fine-tuning`;
|
return `/fine-tuning`;
|
||||||
},
|
},
|
||||||
settings: {
|
settings: {
|
||||||
system: () => {
|
|
||||||
return `/settings/system-preferences`;
|
|
||||||
},
|
|
||||||
users: () => {
|
users: () => {
|
||||||
return `/settings/users`;
|
return `/settings/users`;
|
||||||
},
|
},
|
||||||
|
@ -95,6 +95,14 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
|
|||||||
# COHERE_API_KEY=
|
# COHERE_API_KEY=
|
||||||
# COHERE_MODEL_PREF='command-r'
|
# COHERE_MODEL_PREF='command-r'
|
||||||
|
|
||||||
|
# LLM_PROVIDER='apipie'
|
||||||
|
# APIPIE_LLM_API_KEY='sk-123abc'
|
||||||
|
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
|
||||||
|
|
||||||
|
# LLM_PROVIDER='xai'
|
||||||
|
# XAI_LLM_API_KEY='xai-your-api-key-here'
|
||||||
|
# XAI_LLM_MODEL_PREF='grok-beta'
|
||||||
|
|
||||||
###########################################
|
###########################################
|
||||||
######## Embedding API SElECTION ##########
|
######## Embedding API SElECTION ##########
|
||||||
###########################################
|
###########################################
|
||||||
@ -209,6 +217,11 @@ TTS_PROVIDER="native"
|
|||||||
# TTS_ELEVEN_LABS_KEY=
|
# TTS_ELEVEN_LABS_KEY=
|
||||||
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
|
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
|
||||||
|
|
||||||
|
# TTS_PROVIDER="generic-openai"
|
||||||
|
# TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
|
||||||
|
# TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
|
||||||
|
# TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
|
||||||
|
|
||||||
# CLOUD DEPLOYMENT VARIRABLES ONLY
|
# CLOUD DEPLOYMENT VARIRABLES ONLY
|
||||||
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
|
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
|
||||||
# STORAGE_DIR= # absolute filesystem path with no trailing slash
|
# STORAGE_DIR= # absolute filesystem path with no trailing slash
|
||||||
@ -259,4 +272,12 @@ TTS_PROVIDER="native"
|
|||||||
# AGENT_SERPLY_API_KEY=
|
# AGENT_SERPLY_API_KEY=
|
||||||
|
|
||||||
#------ SearXNG ----------- https://github.com/searxng/searxng
|
#------ SearXNG ----------- https://github.com/searxng/searxng
|
||||||
# AGENT_SEARXNG_API_URL=
|
# AGENT_SEARXNG_API_URL=
|
||||||
|
|
||||||
|
###########################################
|
||||||
|
######## Other Configurations ############
|
||||||
|
###########################################
|
||||||
|
|
||||||
|
# Disable viewing chat history from the UI and frontend APIs.
|
||||||
|
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
|
||||||
|
# DISABLE_VIEW_CHAT_HISTORY=1
|
@ -347,14 +347,6 @@ function adminEndpoints(app) {
|
|||||||
: await SystemSettings.get({ label });
|
: await SystemSettings.get({ label });
|
||||||
|
|
||||||
switch (label) {
|
switch (label) {
|
||||||
case "limit_user_messages":
|
|
||||||
requestedSettings[label] = setting?.value === "true";
|
|
||||||
break;
|
|
||||||
case "message_limit":
|
|
||||||
requestedSettings[label] = setting?.value
|
|
||||||
? Number(setting.value)
|
|
||||||
: 10;
|
|
||||||
break;
|
|
||||||
case "footer_data":
|
case "footer_data":
|
||||||
requestedSettings[label] = setting?.value ?? JSON.stringify([]);
|
requestedSettings[label] = setting?.value ?? JSON.stringify([]);
|
||||||
break;
|
break;
|
||||||
@ -422,13 +414,6 @@ function adminEndpoints(app) {
|
|||||||
try {
|
try {
|
||||||
const embedder = getEmbeddingEngineSelection();
|
const embedder = getEmbeddingEngineSelection();
|
||||||
const settings = {
|
const settings = {
|
||||||
limit_user_messages:
|
|
||||||
(await SystemSettings.get({ label: "limit_user_messages" }))
|
|
||||||
?.value === "true",
|
|
||||||
message_limit:
|
|
||||||
Number(
|
|
||||||
(await SystemSettings.get({ label: "message_limit" }))?.value
|
|
||||||
) || 10,
|
|
||||||
footer_data:
|
footer_data:
|
||||||
(await SystemSettings.get({ label: "footer_data" }))?.value ||
|
(await SystemSettings.get({ label: "footer_data" }))?.value ||
|
||||||
JSON.stringify([]),
|
JSON.stringify([]),
|
||||||
|
@ -595,56 +595,6 @@ function apiAdminEndpoints(app) {
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
app.get("/v1/admin/preferences", [validApiKey], async (request, response) => {
|
|
||||||
/*
|
|
||||||
#swagger.tags = ['Admin']
|
|
||||||
#swagger.description = 'Show all multi-user preferences for instance. Methods are disabled until multi user mode is enabled via the UI.'
|
|
||||||
#swagger.responses[200] = {
|
|
||||||
content: {
|
|
||||||
"application/json": {
|
|
||||||
schema: {
|
|
||||||
type: 'object',
|
|
||||||
example: {
|
|
||||||
settings: {
|
|
||||||
limit_user_messages: false,
|
|
||||||
message_limit: 10,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#swagger.responses[403] = {
|
|
||||||
schema: {
|
|
||||||
"$ref": "#/definitions/InvalidAPIKey"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#swagger.responses[401] = {
|
|
||||||
description: "Instance is not in Multi-User mode. Method denied",
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
try {
|
|
||||||
if (!multiUserMode(response)) {
|
|
||||||
response.sendStatus(401).end();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const settings = {
|
|
||||||
limit_user_messages:
|
|
||||||
(await SystemSettings.get({ label: "limit_user_messages" }))
|
|
||||||
?.value === "true",
|
|
||||||
message_limit:
|
|
||||||
Number(
|
|
||||||
(await SystemSettings.get({ label: "message_limit" }))?.value
|
|
||||||
) || 10,
|
|
||||||
};
|
|
||||||
response.status(200).json({ settings });
|
|
||||||
} catch (e) {
|
|
||||||
console.error(e);
|
|
||||||
response.sendStatus(500).end();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
app.post(
|
app.post(
|
||||||
"/v1/admin/preferences",
|
"/v1/admin/preferences",
|
||||||
[validApiKey],
|
[validApiKey],
|
||||||
@ -658,8 +608,7 @@ function apiAdminEndpoints(app) {
|
|||||||
content: {
|
content: {
|
||||||
"application/json": {
|
"application/json": {
|
||||||
example: {
|
example: {
|
||||||
limit_user_messages: true,
|
support_email: "support@example.com",
|
||||||
message_limit: 5,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,12 +31,14 @@ function apiWorkspaceThreadEndpoints(app) {
|
|||||||
type: 'string'
|
type: 'string'
|
||||||
}
|
}
|
||||||
#swagger.requestBody = {
|
#swagger.requestBody = {
|
||||||
description: 'Optional userId associated with the thread',
|
description: 'Optional userId associated with the thread, thread slug and thread name',
|
||||||
required: false,
|
required: false,
|
||||||
content: {
|
content: {
|
||||||
"application/json": {
|
"application/json": {
|
||||||
example: {
|
example: {
|
||||||
userId: 1
|
userId: 1,
|
||||||
|
name: 'Name',
|
||||||
|
slug: 'thread-slug'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -67,9 +69,9 @@ function apiWorkspaceThreadEndpoints(app) {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
try {
|
try {
|
||||||
const { slug } = request.params;
|
const wslug = request.params.slug;
|
||||||
let { userId = null } = reqBody(request);
|
let { userId = null, name = null, slug = null } = reqBody(request);
|
||||||
const workspace = await Workspace.get({ slug });
|
const workspace = await Workspace.get({ slug: wslug });
|
||||||
|
|
||||||
if (!workspace) {
|
if (!workspace) {
|
||||||
response.sendStatus(400).end();
|
response.sendStatus(400).end();
|
||||||
@ -83,7 +85,8 @@ function apiWorkspaceThreadEndpoints(app) {
|
|||||||
|
|
||||||
const { thread, message } = await WorkspaceThread.new(
|
const { thread, message } = await WorkspaceThread.new(
|
||||||
workspace,
|
workspace,
|
||||||
userId ? Number(userId) : null
|
userId ? Number(userId) : null,
|
||||||
|
{ name, slug }
|
||||||
);
|
);
|
||||||
|
|
||||||
await Telemetry.sendTelemetry("workspace_thread_created", {
|
await Telemetry.sendTelemetry("workspace_thread_created", {
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
const { v4: uuidv4 } = require("uuid");
|
const { v4: uuidv4 } = require("uuid");
|
||||||
const { reqBody, userFromSession, multiUserMode } = require("../utils/http");
|
const { reqBody, userFromSession, multiUserMode } = require("../utils/http");
|
||||||
const { validatedRequest } = require("../utils/middleware/validatedRequest");
|
const { validatedRequest } = require("../utils/middleware/validatedRequest");
|
||||||
const { WorkspaceChats } = require("../models/workspaceChats");
|
|
||||||
const { SystemSettings } = require("../models/systemSettings");
|
|
||||||
const { Telemetry } = require("../models/telemetry");
|
const { Telemetry } = require("../models/telemetry");
|
||||||
const { streamChatWithWorkspace } = require("../utils/chats/stream");
|
const { streamChatWithWorkspace } = require("../utils/chats/stream");
|
||||||
const {
|
const {
|
||||||
@ -16,6 +14,7 @@ const {
|
|||||||
} = require("../utils/middleware/validWorkspace");
|
} = require("../utils/middleware/validWorkspace");
|
||||||
const { writeResponseChunk } = require("../utils/helpers/chat/responses");
|
const { writeResponseChunk } = require("../utils/helpers/chat/responses");
|
||||||
const { WorkspaceThread } = require("../models/workspaceThread");
|
const { WorkspaceThread } = require("../models/workspaceThread");
|
||||||
|
const { User } = require("../models/user");
|
||||||
const truncate = require("truncate");
|
const truncate = require("truncate");
|
||||||
|
|
||||||
function chatEndpoints(app) {
|
function chatEndpoints(app) {
|
||||||
@ -48,39 +47,16 @@ function chatEndpoints(app) {
|
|||||||
response.setHeader("Connection", "keep-alive");
|
response.setHeader("Connection", "keep-alive");
|
||||||
response.flushHeaders();
|
response.flushHeaders();
|
||||||
|
|
||||||
if (multiUserMode(response) && user.role !== ROLES.admin) {
|
if (multiUserMode(response) && !(await User.canSendChat(user))) {
|
||||||
const limitMessagesSetting = await SystemSettings.get({
|
writeResponseChunk(response, {
|
||||||
label: "limit_user_messages",
|
id: uuidv4(),
|
||||||
|
type: "abort",
|
||||||
|
textResponse: null,
|
||||||
|
sources: [],
|
||||||
|
close: true,
|
||||||
|
error: `You have met your maximum 24 hour chat quota of ${user.dailyMessageLimit} chats. Try again later.`,
|
||||||
});
|
});
|
||||||
const limitMessages = limitMessagesSetting?.value === "true";
|
return;
|
||||||
|
|
||||||
if (limitMessages) {
|
|
||||||
const messageLimitSetting = await SystemSettings.get({
|
|
||||||
label: "message_limit",
|
|
||||||
});
|
|
||||||
const systemLimit = Number(messageLimitSetting?.value);
|
|
||||||
|
|
||||||
if (!!systemLimit) {
|
|
||||||
const currentChatCount = await WorkspaceChats.count({
|
|
||||||
user_id: user.id,
|
|
||||||
createdAt: {
|
|
||||||
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (currentChatCount >= systemLimit) {
|
|
||||||
writeResponseChunk(response, {
|
|
||||||
id: uuidv4(),
|
|
||||||
type: "abort",
|
|
||||||
textResponse: null,
|
|
||||||
sources: [],
|
|
||||||
close: true,
|
|
||||||
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await streamChatWithWorkspace(
|
await streamChatWithWorkspace(
|
||||||
@ -157,41 +133,16 @@ function chatEndpoints(app) {
|
|||||||
response.setHeader("Connection", "keep-alive");
|
response.setHeader("Connection", "keep-alive");
|
||||||
response.flushHeaders();
|
response.flushHeaders();
|
||||||
|
|
||||||
if (multiUserMode(response) && user.role !== ROLES.admin) {
|
if (multiUserMode(response) && !(await User.canSendChat(user))) {
|
||||||
const limitMessagesSetting = await SystemSettings.get({
|
writeResponseChunk(response, {
|
||||||
label: "limit_user_messages",
|
id: uuidv4(),
|
||||||
|
type: "abort",
|
||||||
|
textResponse: null,
|
||||||
|
sources: [],
|
||||||
|
close: true,
|
||||||
|
error: `You have met your maximum 24 hour chat quota of ${user.dailyMessageLimit} chats. Try again later.`,
|
||||||
});
|
});
|
||||||
const limitMessages = limitMessagesSetting?.value === "true";
|
return;
|
||||||
|
|
||||||
if (limitMessages) {
|
|
||||||
const messageLimitSetting = await SystemSettings.get({
|
|
||||||
label: "message_limit",
|
|
||||||
});
|
|
||||||
const systemLimit = Number(messageLimitSetting?.value);
|
|
||||||
|
|
||||||
if (!!systemLimit) {
|
|
||||||
// Chat qty includes all threads because any user can freely
|
|
||||||
// create threads and would bypass this rule.
|
|
||||||
const currentChatCount = await WorkspaceChats.count({
|
|
||||||
user_id: user.id,
|
|
||||||
createdAt: {
|
|
||||||
gte: new Date(new Date() - 24 * 60 * 60 * 1000),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (currentChatCount >= systemLimit) {
|
|
||||||
writeResponseChunk(response, {
|
|
||||||
id: uuidv4(),
|
|
||||||
type: "abort",
|
|
||||||
textResponse: null,
|
|
||||||
sources: [],
|
|
||||||
close: true,
|
|
||||||
error: `You have met your maximum 24 hour chat quota of ${systemLimit} chats set by the instance administrators. Try again later.`,
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await streamChatWithWorkspace(
|
await streamChatWithWorkspace(
|
||||||
|
@ -56,6 +56,7 @@ function embeddedEndpoints(app) {
|
|||||||
writeResponseChunk(response, {
|
writeResponseChunk(response, {
|
||||||
id: uuidv4(),
|
id: uuidv4(),
|
||||||
type: "abort",
|
type: "abort",
|
||||||
|
sources: [],
|
||||||
textResponse: null,
|
textResponse: null,
|
||||||
close: true,
|
close: true,
|
||||||
error: e.message,
|
error: e.message,
|
||||||
@ -72,11 +73,15 @@ function embeddedEndpoints(app) {
|
|||||||
try {
|
try {
|
||||||
const { sessionId } = request.params;
|
const { sessionId } = request.params;
|
||||||
const embed = response.locals.embedConfig;
|
const embed = response.locals.embedConfig;
|
||||||
|
const history = await EmbedChats.forEmbedByUser(
|
||||||
|
embed.id,
|
||||||
|
sessionId,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
const history = await EmbedChats.forEmbedByUser(embed.id, sessionId);
|
response.status(200).json({ history: convertToChatHistory(history) });
|
||||||
response.status(200).json({
|
|
||||||
history: convertToChatHistory(history),
|
|
||||||
});
|
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error(e.message, e);
|
console.error(e.message, e);
|
||||||
response.sendStatus(500).end();
|
response.sendStatus(500).end();
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
const { EmbedChats } = require("../models/embedChats");
|
const { EmbedChats } = require("../models/embedChats");
|
||||||
const { EmbedConfig } = require("../models/embedConfig");
|
const { EmbedConfig } = require("../models/embedConfig");
|
||||||
const { EventLogs } = require("../models/eventLogs");
|
const { EventLogs } = require("../models/eventLogs");
|
||||||
const { Workspace } = require("../models/workspace");
|
|
||||||
const { reqBody, userFromSession } = require("../utils/http");
|
const { reqBody, userFromSession } = require("../utils/http");
|
||||||
const { validEmbedConfigId } = require("../utils/middleware/embedMiddleware");
|
const { validEmbedConfigId } = require("../utils/middleware/embedMiddleware");
|
||||||
const {
|
const {
|
||||||
@ -9,6 +8,9 @@ const {
|
|||||||
ROLES,
|
ROLES,
|
||||||
} = require("../utils/middleware/multiUserProtected");
|
} = require("../utils/middleware/multiUserProtected");
|
||||||
const { validatedRequest } = require("../utils/middleware/validatedRequest");
|
const { validatedRequest } = require("../utils/middleware/validatedRequest");
|
||||||
|
const {
|
||||||
|
chatHistoryViewable,
|
||||||
|
} = require("../utils/middleware/chatHistoryViewable");
|
||||||
|
|
||||||
function embedManagementEndpoints(app) {
|
function embedManagementEndpoints(app) {
|
||||||
if (!app) return;
|
if (!app) return;
|
||||||
@ -90,7 +92,7 @@ function embedManagementEndpoints(app) {
|
|||||||
|
|
||||||
app.post(
|
app.post(
|
||||||
"/embed/chats",
|
"/embed/chats",
|
||||||
[validatedRequest, flexUserRoleValid([ROLES.admin])],
|
[chatHistoryViewable, validatedRequest, flexUserRoleValid([ROLES.admin])],
|
||||||
async (request, response) => {
|
async (request, response) => {
|
||||||
try {
|
try {
|
||||||
const { offset = 0, limit = 20 } = reqBody(request);
|
const { offset = 0, limit = 20 } = reqBody(request);
|
||||||
|
@ -55,6 +55,9 @@ const {
|
|||||||
const { SlashCommandPresets } = require("../models/slashCommandsPresets");
|
const { SlashCommandPresets } = require("../models/slashCommandsPresets");
|
||||||
const { EncryptionManager } = require("../utils/EncryptionManager");
|
const { EncryptionManager } = require("../utils/EncryptionManager");
|
||||||
const { BrowserExtensionApiKey } = require("../models/browserExtensionApiKey");
|
const { BrowserExtensionApiKey } = require("../models/browserExtensionApiKey");
|
||||||
|
const {
|
||||||
|
chatHistoryViewable,
|
||||||
|
} = require("../utils/middleware/chatHistoryViewable");
|
||||||
|
|
||||||
function systemEndpoints(app) {
|
function systemEndpoints(app) {
|
||||||
if (!app) return;
|
if (!app) return;
|
||||||
@ -495,8 +498,6 @@ function systemEndpoints(app) {
|
|||||||
|
|
||||||
await SystemSettings._updateSettings({
|
await SystemSettings._updateSettings({
|
||||||
multi_user_mode: true,
|
multi_user_mode: true,
|
||||||
limit_user_messages: false,
|
|
||||||
message_limit: 25,
|
|
||||||
});
|
});
|
||||||
await BrowserExtensionApiKey.migrateApiKeysToMultiUser(user.id);
|
await BrowserExtensionApiKey.migrateApiKeysToMultiUser(user.id);
|
||||||
|
|
||||||
@ -968,7 +969,11 @@ function systemEndpoints(app) {
|
|||||||
|
|
||||||
app.post(
|
app.post(
|
||||||
"/system/workspace-chats",
|
"/system/workspace-chats",
|
||||||
[validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
|
[
|
||||||
|
chatHistoryViewable,
|
||||||
|
validatedRequest,
|
||||||
|
flexUserRoleValid([ROLES.admin, ROLES.manager]),
|
||||||
|
],
|
||||||
async (request, response) => {
|
async (request, response) => {
|
||||||
try {
|
try {
|
||||||
const { offset = 0, limit = 20 } = reqBody(request);
|
const { offset = 0, limit = 20 } = reqBody(request);
|
||||||
@ -1008,7 +1013,11 @@ function systemEndpoints(app) {
|
|||||||
|
|
||||||
app.get(
|
app.get(
|
||||||
"/system/export-chats",
|
"/system/export-chats",
|
||||||
[validatedRequest, flexUserRoleValid([ROLES.manager, ROLES.admin])],
|
[
|
||||||
|
chatHistoryViewable,
|
||||||
|
validatedRequest,
|
||||||
|
flexUserRoleValid([ROLES.manager, ROLES.admin]),
|
||||||
|
],
|
||||||
async (request, response) => {
|
async (request, response) => {
|
||||||
try {
|
try {
|
||||||
const { type = "jsonl", chatType = "workspace" } = request.query;
|
const { type = "jsonl", chatType = "workspace" } = request.query;
|
||||||
|
@ -1,5 +1,17 @@
|
|||||||
|
const { safeJsonParse } = require("../utils/http");
|
||||||
const prisma = require("../utils/prisma");
|
const prisma = require("../utils/prisma");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @typedef {Object} EmbedChat
|
||||||
|
* @property {number} id
|
||||||
|
* @property {number} embed_id
|
||||||
|
* @property {string} prompt
|
||||||
|
* @property {string} response
|
||||||
|
* @property {string} connection_information
|
||||||
|
* @property {string} session_id
|
||||||
|
* @property {boolean} include
|
||||||
|
*/
|
||||||
|
|
||||||
const EmbedChats = {
|
const EmbedChats = {
|
||||||
new: async function ({
|
new: async function ({
|
||||||
embedId,
|
embedId,
|
||||||
@ -25,11 +37,36 @@ const EmbedChats = {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loops through each chat and filters out the sources from the response object.
|
||||||
|
* We do this when returning /history of an embed to the frontend to prevent inadvertent leaking
|
||||||
|
* of private sources the user may not have intended to share with users.
|
||||||
|
* @param {EmbedChat[]} chats
|
||||||
|
* @returns {EmbedChat[]} Returns a new array of chats with the sources filtered out of responses
|
||||||
|
*/
|
||||||
|
filterSources: function (chats) {
|
||||||
|
return chats.map((chat) => {
|
||||||
|
const { response, ...rest } = chat;
|
||||||
|
const { sources, ...responseRest } = safeJsonParse(response);
|
||||||
|
return { ...rest, response: JSON.stringify(responseRest) };
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetches chats for a given embed and session id.
|
||||||
|
* @param {number} embedId the id of the embed to fetch chats for
|
||||||
|
* @param {string} sessionId the id of the session to fetch chats for
|
||||||
|
* @param {number|null} limit the maximum number of chats to fetch
|
||||||
|
* @param {string|null} orderBy the order to fetch chats in
|
||||||
|
* @param {boolean} filterSources whether to filter out the sources from the response (default: false)
|
||||||
|
* @returns {Promise<EmbedChat[]>} Returns an array of chats for the given embed and session
|
||||||
|
*/
|
||||||
forEmbedByUser: async function (
|
forEmbedByUser: async function (
|
||||||
embedId = null,
|
embedId = null,
|
||||||
sessionId = null,
|
sessionId = null,
|
||||||
limit = null,
|
limit = null,
|
||||||
orderBy = null
|
orderBy = null,
|
||||||
|
filterSources = false
|
||||||
) {
|
) {
|
||||||
if (!embedId || !sessionId) return [];
|
if (!embedId || !sessionId) return [];
|
||||||
|
|
||||||
@ -43,7 +80,7 @@ const EmbedChats = {
|
|||||||
...(limit !== null ? { take: limit } : {}),
|
...(limit !== null ? { take: limit } : {}),
|
||||||
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
|
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
|
||||||
});
|
});
|
||||||
return chats;
|
return filterSources ? this.filterSources(chats) : chats;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(error.message);
|
console.error(error.message);
|
||||||
return [];
|
return [];
|
||||||
|
@ -21,8 +21,6 @@ function isNullOrNaN(value) {
|
|||||||
const SystemSettings = {
|
const SystemSettings = {
|
||||||
protectedFields: ["multi_user_mode"],
|
protectedFields: ["multi_user_mode"],
|
||||||
publicFields: [
|
publicFields: [
|
||||||
"limit_user_messages",
|
|
||||||
"message_limit",
|
|
||||||
"footer_data",
|
"footer_data",
|
||||||
"support_email",
|
"support_email",
|
||||||
"text_splitter_chunk_size",
|
"text_splitter_chunk_size",
|
||||||
@ -38,8 +36,6 @@ const SystemSettings = {
|
|||||||
"meta_page_favicon",
|
"meta_page_favicon",
|
||||||
],
|
],
|
||||||
supportedFields: [
|
supportedFields: [
|
||||||
"limit_user_messages",
|
|
||||||
"message_limit",
|
|
||||||
"logo_filename",
|
"logo_filename",
|
||||||
"telemetry_id",
|
"telemetry_id",
|
||||||
"footer_data",
|
"footer_data",
|
||||||
@ -108,6 +104,7 @@ const SystemSettings = {
|
|||||||
"bing-search",
|
"bing-search",
|
||||||
"serply-engine",
|
"serply-engine",
|
||||||
"searxng-engine",
|
"searxng-engine",
|
||||||
|
"tavily-search",
|
||||||
].includes(update)
|
].includes(update)
|
||||||
)
|
)
|
||||||
throw new Error("Invalid SERP provider.");
|
throw new Error("Invalid SERP provider.");
|
||||||
@ -229,12 +226,18 @@ const SystemSettings = {
|
|||||||
TextToSpeechProvider: process.env.TTS_PROVIDER || "native",
|
TextToSpeechProvider: process.env.TTS_PROVIDER || "native",
|
||||||
TTSOpenAIKey: !!process.env.TTS_OPEN_AI_KEY,
|
TTSOpenAIKey: !!process.env.TTS_OPEN_AI_KEY,
|
||||||
TTSOpenAIVoiceModel: process.env.TTS_OPEN_AI_VOICE_MODEL,
|
TTSOpenAIVoiceModel: process.env.TTS_OPEN_AI_VOICE_MODEL,
|
||||||
|
|
||||||
// Eleven Labs TTS
|
// Eleven Labs TTS
|
||||||
TTSElevenLabsKey: !!process.env.TTS_ELEVEN_LABS_KEY,
|
TTSElevenLabsKey: !!process.env.TTS_ELEVEN_LABS_KEY,
|
||||||
TTSElevenLabsVoiceModel: process.env.TTS_ELEVEN_LABS_VOICE_MODEL,
|
TTSElevenLabsVoiceModel: process.env.TTS_ELEVEN_LABS_VOICE_MODEL,
|
||||||
// Piper TTS
|
// Piper TTS
|
||||||
TTSPiperTTSVoiceModel:
|
TTSPiperTTSVoiceModel:
|
||||||
process.env.TTS_PIPER_VOICE_MODEL ?? "en_US-hfc_female-medium",
|
process.env.TTS_PIPER_VOICE_MODEL ?? "en_US-hfc_female-medium",
|
||||||
|
// OpenAI Generic TTS
|
||||||
|
TTSOpenAICompatibleKey: !!process.env.TTS_OPEN_AI_COMPATIBLE_KEY,
|
||||||
|
TTSOpenAICompatibleVoiceModel:
|
||||||
|
process.env.TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL,
|
||||||
|
TTSOpenAICompatibleEndpoint: process.env.TTS_OPEN_AI_COMPATIBLE_ENDPOINT,
|
||||||
|
|
||||||
// --------------------------------------------------------
|
// --------------------------------------------------------
|
||||||
// Agent Settings & Configs
|
// Agent Settings & Configs
|
||||||
@ -247,6 +250,14 @@ const SystemSettings = {
|
|||||||
AgentBingSearchApiKey: !!process.env.AGENT_BING_SEARCH_API_KEY || null,
|
AgentBingSearchApiKey: !!process.env.AGENT_BING_SEARCH_API_KEY || null,
|
||||||
AgentSerplyApiKey: !!process.env.AGENT_SERPLY_API_KEY || null,
|
AgentSerplyApiKey: !!process.env.AGENT_SERPLY_API_KEY || null,
|
||||||
AgentSearXNGApiUrl: process.env.AGENT_SEARXNG_API_URL || null,
|
AgentSearXNGApiUrl: process.env.AGENT_SEARXNG_API_URL || null,
|
||||||
|
AgentTavilyApiKey: !!process.env.AGENT_TAVILY_API_KEY || null,
|
||||||
|
|
||||||
|
// --------------------------------------------------------
|
||||||
|
// Compliance Settings
|
||||||
|
// --------------------------------------------------------
|
||||||
|
// Disable View Chat History for the whole instance.
|
||||||
|
DisableViewChatHistory:
|
||||||
|
"DISABLE_VIEW_CHAT_HISTORY" in process.env || false,
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -515,6 +526,14 @@ const SystemSettings = {
|
|||||||
// DeepSeek API Keys
|
// DeepSeek API Keys
|
||||||
DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
|
DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
|
||||||
DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
|
DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
|
||||||
|
|
||||||
|
// APIPie LLM API Keys
|
||||||
|
ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
|
||||||
|
ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
|
||||||
|
|
||||||
|
// xAI LLM API Keys
|
||||||
|
XAIApiKey: !!process.env.XAI_LLM_API_KEY,
|
||||||
|
XAIModelPref: process.env.XAI_LLM_MODEL_PREF,
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -1,6 +1,17 @@
|
|||||||
const prisma = require("../utils/prisma");
|
const prisma = require("../utils/prisma");
|
||||||
const { EventLogs } = require("./eventLogs");
|
const { EventLogs } = require("./eventLogs");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @typedef {Object} User
|
||||||
|
* @property {number} id
|
||||||
|
* @property {string} username
|
||||||
|
* @property {string} password
|
||||||
|
* @property {string} pfpFilename
|
||||||
|
* @property {string} role
|
||||||
|
* @property {boolean} suspended
|
||||||
|
* @property {number|null} dailyMessageLimit
|
||||||
|
*/
|
||||||
|
|
||||||
const User = {
|
const User = {
|
||||||
usernameRegex: new RegExp(/^[a-z0-9_-]+$/),
|
usernameRegex: new RegExp(/^[a-z0-9_-]+$/),
|
||||||
writable: [
|
writable: [
|
||||||
@ -10,6 +21,7 @@ const User = {
|
|||||||
"pfpFilename",
|
"pfpFilename",
|
||||||
"role",
|
"role",
|
||||||
"suspended",
|
"suspended",
|
||||||
|
"dailyMessageLimit",
|
||||||
],
|
],
|
||||||
validations: {
|
validations: {
|
||||||
username: (newValue = "") => {
|
username: (newValue = "") => {
|
||||||
@ -32,12 +44,24 @@ const User = {
|
|||||||
}
|
}
|
||||||
return String(role);
|
return String(role);
|
||||||
},
|
},
|
||||||
|
dailyMessageLimit: (dailyMessageLimit = null) => {
|
||||||
|
if (dailyMessageLimit === null) return null;
|
||||||
|
const limit = Number(dailyMessageLimit);
|
||||||
|
if (isNaN(limit) || limit < 1) {
|
||||||
|
throw new Error(
|
||||||
|
"Daily message limit must be null or a number greater than or equal to 1"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return limit;
|
||||||
|
},
|
||||||
},
|
},
|
||||||
// validations for the above writable fields.
|
// validations for the above writable fields.
|
||||||
castColumnValue: function (key, value) {
|
castColumnValue: function (key, value) {
|
||||||
switch (key) {
|
switch (key) {
|
||||||
case "suspended":
|
case "suspended":
|
||||||
return Number(Boolean(value));
|
return Number(Boolean(value));
|
||||||
|
case "dailyMessageLimit":
|
||||||
|
return value === null ? null : Number(value);
|
||||||
default:
|
default:
|
||||||
return String(value);
|
return String(value);
|
||||||
}
|
}
|
||||||
@ -48,7 +72,12 @@ const User = {
|
|||||||
return { ...rest };
|
return { ...rest };
|
||||||
},
|
},
|
||||||
|
|
||||||
create: async function ({ username, password, role = "default" }) {
|
create: async function ({
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
role = "default",
|
||||||
|
dailyMessageLimit = null,
|
||||||
|
}) {
|
||||||
const passwordCheck = this.checkPasswordComplexity(password);
|
const passwordCheck = this.checkPasswordComplexity(password);
|
||||||
if (!passwordCheck.checkedOK) {
|
if (!passwordCheck.checkedOK) {
|
||||||
return { user: null, error: passwordCheck.error };
|
return { user: null, error: passwordCheck.error };
|
||||||
@ -58,7 +87,7 @@ const User = {
|
|||||||
// Do not allow new users to bypass validation
|
// Do not allow new users to bypass validation
|
||||||
if (!this.usernameRegex.test(username))
|
if (!this.usernameRegex.test(username))
|
||||||
throw new Error(
|
throw new Error(
|
||||||
"Username must be only contain lowercase letters, numbers, underscores, and hyphens with no spaces"
|
"Username must only contain lowercase letters, numbers, underscores, and hyphens with no spaces"
|
||||||
);
|
);
|
||||||
|
|
||||||
const bcrypt = require("bcrypt");
|
const bcrypt = require("bcrypt");
|
||||||
@ -68,6 +97,8 @@ const User = {
|
|||||||
username: this.validations.username(username),
|
username: this.validations.username(username),
|
||||||
password: hashedPassword,
|
password: hashedPassword,
|
||||||
role: this.validations.role(role),
|
role: this.validations.role(role),
|
||||||
|
dailyMessageLimit:
|
||||||
|
this.validations.dailyMessageLimit(dailyMessageLimit),
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
return { user: this.filterFields(user), error: null };
|
return { user: this.filterFields(user), error: null };
|
||||||
@ -135,7 +166,7 @@ const User = {
|
|||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error:
|
error:
|
||||||
"Username must be only contain lowercase letters, numbers, underscores, and hyphens with no spaces",
|
"Username must only contain lowercase letters, numbers, underscores, and hyphens with no spaces",
|
||||||
};
|
};
|
||||||
|
|
||||||
const user = await prisma.users.update({
|
const user = await prisma.users.update({
|
||||||
@ -260,6 +291,29 @@ const User = {
|
|||||||
|
|
||||||
return { checkedOK: true, error: "No error." };
|
return { checkedOK: true, error: "No error." };
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a user can send a chat based on their daily message limit.
|
||||||
|
* This limit is system wide and not per workspace and only applies to
|
||||||
|
* multi-user mode AND non-admin users.
|
||||||
|
* @param {User} user The user object record.
|
||||||
|
* @returns {Promise<boolean>} True if the user can send a chat, false otherwise.
|
||||||
|
*/
|
||||||
|
canSendChat: async function (user) {
|
||||||
|
const { ROLES } = require("../utils/middleware/multiUserProtected");
|
||||||
|
if (!user || user.dailyMessageLimit === null || user.role === ROLES.admin)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
const { WorkspaceChats } = require("./workspaceChats");
|
||||||
|
const currentChatCount = await WorkspaceChats.count({
|
||||||
|
user_id: user.id,
|
||||||
|
createdAt: {
|
||||||
|
gte: new Date(new Date() - 24 * 60 * 60 * 1000), // 24 hours
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
return currentChatCount < user.dailyMessageLimit;
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = { User };
|
module.exports = { User };
|
||||||
|
@ -1,16 +1,44 @@
|
|||||||
const prisma = require("../utils/prisma");
|
const prisma = require("../utils/prisma");
|
||||||
|
const slugifyModule = require("slugify");
|
||||||
const { v4: uuidv4 } = require("uuid");
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
|
||||||
const WorkspaceThread = {
|
const WorkspaceThread = {
|
||||||
defaultName: "Thread",
|
defaultName: "Thread",
|
||||||
writable: ["name"],
|
writable: ["name"],
|
||||||
|
|
||||||
new: async function (workspace, userId = null) {
|
/**
|
||||||
|
* The default Slugify module requires some additional mapping to prevent downstream issues
|
||||||
|
* if the user is able to define a slug externally. We have to block non-escapable URL chars
|
||||||
|
* so that is the slug is rendered it doesn't break the URL or UI when visited.
|
||||||
|
* @param {...any} args - slugify args for npm package.
|
||||||
|
* @returns {string}
|
||||||
|
*/
|
||||||
|
slugify: function (...args) {
|
||||||
|
slugifyModule.extend({
|
||||||
|
"+": " plus ",
|
||||||
|
"!": " bang ",
|
||||||
|
"@": " at ",
|
||||||
|
"*": " splat ",
|
||||||
|
".": " dot ",
|
||||||
|
":": "",
|
||||||
|
"~": "",
|
||||||
|
"(": "",
|
||||||
|
")": "",
|
||||||
|
"'": "",
|
||||||
|
'"': "",
|
||||||
|
"|": "",
|
||||||
|
});
|
||||||
|
return slugifyModule(...args);
|
||||||
|
},
|
||||||
|
|
||||||
|
new: async function (workspace, userId = null, data = {}) {
|
||||||
try {
|
try {
|
||||||
const thread = await prisma.workspace_threads.create({
|
const thread = await prisma.workspace_threads.create({
|
||||||
data: {
|
data: {
|
||||||
name: this.defaultName,
|
name: data.name ? String(data.name) : this.defaultName,
|
||||||
slug: uuidv4(),
|
slug: data.slug
|
||||||
|
? this.slugify(data.slug, { lowercase: true })
|
||||||
|
: uuidv4(),
|
||||||
user_id: userId ? Number(userId) : null,
|
user_id: userId ? Number(userId) : null,
|
||||||
workspace_id: workspace.id,
|
workspace_id: workspace.id,
|
||||||
},
|
},
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
-- AlterTable
|
||||||
|
ALTER TABLE "users" ADD COLUMN "dailyMessageLimit" INTEGER;
|
@ -67,6 +67,7 @@ model users {
|
|||||||
seen_recovery_codes Boolean? @default(false)
|
seen_recovery_codes Boolean? @default(false)
|
||||||
createdAt DateTime @default(now())
|
createdAt DateTime @default(now())
|
||||||
lastUpdatedAt DateTime @default(now())
|
lastUpdatedAt DateTime @default(now())
|
||||||
|
dailyMessageLimit Int?
|
||||||
workspace_chats workspace_chats[]
|
workspace_chats workspace_chats[]
|
||||||
workspace_users workspace_users[]
|
workspace_users workspace_users[]
|
||||||
embed_configs embed_configs[]
|
embed_configs embed_configs[]
|
||||||
@ -309,4 +310,4 @@ model browser_extension_api_keys {
|
|||||||
user users? @relation(fields: [user_id], references: [id], onDelete: Cascade)
|
user users? @relation(fields: [user_id], references: [id], onDelete: Cascade)
|
||||||
|
|
||||||
@@index([user_id])
|
@@index([user_id])
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,6 @@ const prisma = new PrismaClient();
|
|||||||
async function main() {
|
async function main() {
|
||||||
const settings = [
|
const settings = [
|
||||||
{ label: "multi_user_mode", value: "false" },
|
{ label: "multi_user_mode", value: "false" },
|
||||||
{ label: "limit_user_messages", value: "false" },
|
|
||||||
{ label: "message_limit", value: "25" },
|
|
||||||
{ label: "logo_filename", value: "anything-llm.png" },
|
{ label: "logo_filename", value: "anything-llm.png" },
|
||||||
];
|
];
|
||||||
|
|
||||||
|
3
server/storage/models/.gitignore
vendored
3
server/storage/models/.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
Xenova
|
Xenova
|
||||||
downloaded/*
|
downloaded/*
|
||||||
!downloaded/.placeholder
|
!downloaded/.placeholder
|
||||||
openrouter
|
openrouter
|
||||||
|
apipie
|
@ -693,52 +693,6 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/v1/admin/preferences": {
|
"/v1/admin/preferences": {
|
||||||
"get": {
|
|
||||||
"tags": [
|
|
||||||
"Admin"
|
|
||||||
],
|
|
||||||
"description": "Show all multi-user preferences for instance. Methods are disabled until multi user mode is enabled via the UI.",
|
|
||||||
"parameters": [],
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "OK",
|
|
||||||
"content": {
|
|
||||||
"application/json": {
|
|
||||||
"schema": {
|
|
||||||
"type": "object",
|
|
||||||
"example": {
|
|
||||||
"settings": {
|
|
||||||
"limit_user_messages": false,
|
|
||||||
"message_limit": 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"401": {
|
|
||||||
"description": "Instance is not in Multi-User mode. Method denied"
|
|
||||||
},
|
|
||||||
"403": {
|
|
||||||
"description": "Forbidden",
|
|
||||||
"content": {
|
|
||||||
"application/json": {
|
|
||||||
"schema": {
|
|
||||||
"$ref": "#/components/schemas/InvalidAPIKey"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"application/xml": {
|
|
||||||
"schema": {
|
|
||||||
"$ref": "#/components/schemas/InvalidAPIKey"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"500": {
|
|
||||||
"description": "Internal Server Error"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"post": {
|
"post": {
|
||||||
"tags": [
|
"tags": [
|
||||||
"Admin"
|
"Admin"
|
||||||
@ -788,8 +742,7 @@
|
|||||||
"content": {
|
"content": {
|
||||||
"application/json": {
|
"application/json": {
|
||||||
"example": {
|
"example": {
|
||||||
"limit_user_messages": true,
|
"support_email": "support@example.com"
|
||||||
"message_limit": 5
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2438,12 +2391,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"requestBody": {
|
"requestBody": {
|
||||||
"description": "Optional userId associated with the thread",
|
"description": "Optional userId associated with the thread, thread slug and thread name",
|
||||||
"required": false,
|
"required": false,
|
||||||
"content": {
|
"content": {
|
||||||
"application/json": {
|
"application/json": {
|
||||||
"example": {
|
"example": {
|
||||||
"userId": 1
|
"userId": 1,
|
||||||
|
"name": "Name",
|
||||||
|
"slug": "thread-slug"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
336
server/utils/AiProviders/apipie/index.js
Normal file
336
server/utils/AiProviders/apipie/index.js
Normal file
@ -0,0 +1,336 @@
|
|||||||
|
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||||
|
const {
|
||||||
|
handleDefaultStreamResponseV2,
|
||||||
|
} = require("../../helpers/chat/responses");
|
||||||
|
|
||||||
|
const { v4: uuidv4 } = require("uuid");
|
||||||
|
const {
|
||||||
|
writeResponseChunk,
|
||||||
|
clientAbortedHandler,
|
||||||
|
} = require("../../helpers/chat/responses");
|
||||||
|
|
||||||
|
const fs = require("fs");
|
||||||
|
const path = require("path");
|
||||||
|
const { safeJsonParse } = require("../../http");
|
||||||
|
const cacheFolder = path.resolve(
|
||||||
|
process.env.STORAGE_DIR
|
||||||
|
? path.resolve(process.env.STORAGE_DIR, "models", "apipie")
|
||||||
|
: path.resolve(__dirname, `../../../storage/models/apipie`)
|
||||||
|
);
|
||||||
|
|
||||||
|
class ApiPieLLM {
|
||||||
|
constructor(embedder = null, modelPreference = null) {
|
||||||
|
if (!process.env.APIPIE_LLM_API_KEY)
|
||||||
|
throw new Error("No ApiPie LLM API key was set.");
|
||||||
|
|
||||||
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
this.basePath = "https://apipie.ai/v1";
|
||||||
|
this.openai = new OpenAIApi({
|
||||||
|
baseURL: this.basePath,
|
||||||
|
apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
|
||||||
|
});
|
||||||
|
this.model =
|
||||||
|
modelPreference ||
|
||||||
|
process.env.APIPIE_LLM_MODEL_PREF ||
|
||||||
|
"openrouter/mistral-7b-instruct";
|
||||||
|
this.limits = {
|
||||||
|
history: this.promptWindowLimit() * 0.15,
|
||||||
|
system: this.promptWindowLimit() * 0.15,
|
||||||
|
user: this.promptWindowLimit() * 0.7,
|
||||||
|
};
|
||||||
|
|
||||||
|
this.embedder = embedder ?? new NativeEmbedder();
|
||||||
|
this.defaultTemp = 0.7;
|
||||||
|
|
||||||
|
if (!fs.existsSync(cacheFolder))
|
||||||
|
fs.mkdirSync(cacheFolder, { recursive: true });
|
||||||
|
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
|
||||||
|
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
|
||||||
|
}
|
||||||
|
|
||||||
|
log(text, ...args) {
|
||||||
|
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
|
||||||
|
// from the current date. If it is, then we will refetch the API so that all the models are up
|
||||||
|
// to date.
|
||||||
|
#cacheIsStale() {
|
||||||
|
const MAX_STALE = 6.048e8; // 1 Week in MS
|
||||||
|
if (!fs.existsSync(this.cacheAtPath)) return true;
|
||||||
|
const now = Number(new Date());
|
||||||
|
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
|
||||||
|
return now - timestampMs > MAX_STALE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function fetches the models from the ApiPie API and caches them locally.
|
||||||
|
// We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
|
||||||
|
// for each model and this is a constructor property - so we can really only get it if this cache exists.
|
||||||
|
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
|
||||||
|
// This might slow down the first request, but we need the proper token context window
|
||||||
|
// for each model and this is a constructor property - so we can really only get it if this cache exists.
|
||||||
|
async #syncModels() {
|
||||||
|
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
this.log("Model cache is not present or stale. Fetching from ApiPie API.");
|
||||||
|
await fetchApiPieModels();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
#appendContext(contextTexts = []) {
|
||||||
|
if (!contextTexts || !contextTexts.length) return "";
|
||||||
|
return (
|
||||||
|
"\nContext:\n" +
|
||||||
|
contextTexts
|
||||||
|
.map((text, i) => {
|
||||||
|
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||||
|
})
|
||||||
|
.join("")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
models() {
|
||||||
|
if (!fs.existsSync(this.cacheModelPath)) return {};
|
||||||
|
return safeJsonParse(
|
||||||
|
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
|
||||||
|
{}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamingEnabled() {
|
||||||
|
return "streamGetChatCompletion" in this;
|
||||||
|
}
|
||||||
|
|
||||||
|
static promptWindowLimit(modelName) {
|
||||||
|
const cacheModelPath = path.resolve(cacheFolder, "models.json");
|
||||||
|
const availableModels = fs.existsSync(cacheModelPath)
|
||||||
|
? safeJsonParse(
|
||||||
|
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
|
||||||
|
{}
|
||||||
|
)
|
||||||
|
: {};
|
||||||
|
return availableModels[modelName]?.maxLength || 4096;
|
||||||
|
}
|
||||||
|
|
||||||
|
promptWindowLimit() {
|
||||||
|
const availableModels = this.models();
|
||||||
|
return availableModels[this.model]?.maxLength || 4096;
|
||||||
|
}
|
||||||
|
|
||||||
|
async isValidChatCompletionModel(model = "") {
|
||||||
|
await this.#syncModels();
|
||||||
|
const availableModels = this.models();
|
||||||
|
return availableModels.hasOwnProperty(model);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates appropriate content array for a message + attachments.
|
||||||
|
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
|
||||||
|
* @returns {string|object[]}
|
||||||
|
*/
|
||||||
|
#generateContent({ userPrompt, attachments = [] }) {
|
||||||
|
if (!attachments.length) {
|
||||||
|
return userPrompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = [{ type: "text", text: userPrompt }];
|
||||||
|
for (let attachment of attachments) {
|
||||||
|
content.push({
|
||||||
|
type: "image_url",
|
||||||
|
image_url: {
|
||||||
|
url: attachment.contentString,
|
||||||
|
detail: "auto",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return content.flat();
|
||||||
|
}
|
||||||
|
|
||||||
|
constructPrompt({
|
||||||
|
systemPrompt = "",
|
||||||
|
contextTexts = [],
|
||||||
|
chatHistory = [],
|
||||||
|
userPrompt = "",
|
||||||
|
attachments = [],
|
||||||
|
}) {
|
||||||
|
const prompt = {
|
||||||
|
role: "system",
|
||||||
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||||
|
};
|
||||||
|
return [
|
||||||
|
prompt,
|
||||||
|
...chatHistory,
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: this.#generateContent({ userPrompt, attachments }),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||||
|
throw new Error(
|
||||||
|
`ApiPie chat: ${this.model} is not valid for chat completion!`
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await this.openai.chat.completions
|
||||||
|
.create({
|
||||||
|
model: this.model,
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
})
|
||||||
|
.catch((e) => {
|
||||||
|
throw new Error(e.message);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
|
||||||
|
return null;
|
||||||
|
return result.choices[0].message.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIPie says it supports streaming, but it does not work across all models and providers.
|
||||||
|
// Notably, it is not working for OpenRouter models at all.
|
||||||
|
// async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
// if (!(await this.isValidChatCompletionModel(this.model)))
|
||||||
|
// throw new Error(
|
||||||
|
// `ApiPie chat: ${this.model} is not valid for chat completion!`
|
||||||
|
// );
|
||||||
|
|
||||||
|
// const streamRequest = await this.openai.chat.completions.create({
|
||||||
|
// model: this.model,
|
||||||
|
// stream: true,
|
||||||
|
// messages,
|
||||||
|
// temperature,
|
||||||
|
// });
|
||||||
|
// return streamRequest;
|
||||||
|
// }
|
||||||
|
|
||||||
|
handleStream(response, stream, responseProps) {
|
||||||
|
const { uuid = uuidv4(), sources = [] } = responseProps;
|
||||||
|
|
||||||
|
return new Promise(async (resolve) => {
|
||||||
|
let fullText = "";
|
||||||
|
|
||||||
|
// Establish listener to early-abort a streaming response
|
||||||
|
// in case things go sideways or the user does not like the response.
|
||||||
|
// We preserve the generated text but continue as if chat was completed
|
||||||
|
// to preserve previously generated content.
|
||||||
|
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||||
|
response.on("close", handleAbort);
|
||||||
|
|
||||||
|
try {
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const message = chunk?.choices?.[0];
|
||||||
|
const token = message?.delta?.content;
|
||||||
|
|
||||||
|
if (token) {
|
||||||
|
fullText += token;
|
||||||
|
writeResponseChunk(response, {
|
||||||
|
uuid,
|
||||||
|
sources: [],
|
||||||
|
type: "textResponseChunk",
|
||||||
|
textResponse: token,
|
||||||
|
close: false,
|
||||||
|
error: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (message === undefined || message.finish_reason !== null) {
|
||||||
|
writeResponseChunk(response, {
|
||||||
|
uuid,
|
||||||
|
sources,
|
||||||
|
type: "textResponseChunk",
|
||||||
|
textResponse: "",
|
||||||
|
close: true,
|
||||||
|
error: false,
|
||||||
|
});
|
||||||
|
response.removeListener("close", handleAbort);
|
||||||
|
resolve(fullText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
writeResponseChunk(response, {
|
||||||
|
uuid,
|
||||||
|
sources,
|
||||||
|
type: "abort",
|
||||||
|
textResponse: null,
|
||||||
|
close: true,
|
||||||
|
error: e.message,
|
||||||
|
});
|
||||||
|
response.removeListener("close", handleAbort);
|
||||||
|
resolve(fullText);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleStream(response, stream, responseProps) {
|
||||||
|
// return handleDefaultStreamResponseV2(response, stream, responseProps);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||||
|
async embedTextInput(textInput) {
|
||||||
|
return await this.embedder.embedTextInput(textInput);
|
||||||
|
}
|
||||||
|
async embedChunks(textChunks = []) {
|
||||||
|
return await this.embedder.embedChunks(textChunks);
|
||||||
|
}
|
||||||
|
|
||||||
|
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||||
|
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||||
|
const messageArray = this.constructPrompt(promptArgs);
|
||||||
|
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function fetchApiPieModels(providedApiKey = null) {
|
||||||
|
const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null;
|
||||||
|
return await fetch(`https://apipie.ai/v1/models`, {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then((res) => res.json())
|
||||||
|
.then(({ data = [] }) => {
|
||||||
|
const models = {};
|
||||||
|
data.forEach((model) => {
|
||||||
|
models[`${model.provider}/${model.model}`] = {
|
||||||
|
id: `${model.provider}/${model.model}`,
|
||||||
|
name: `${model.provider}/${model.model}`,
|
||||||
|
organization: model.provider,
|
||||||
|
maxLength: model.max_tokens,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
// Cache all response information
|
||||||
|
if (!fs.existsSync(cacheFolder))
|
||||||
|
fs.mkdirSync(cacheFolder, { recursive: true });
|
||||||
|
fs.writeFileSync(
|
||||||
|
path.resolve(cacheFolder, "models.json"),
|
||||||
|
JSON.stringify(models),
|
||||||
|
{
|
||||||
|
encoding: "utf-8",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
fs.writeFileSync(
|
||||||
|
path.resolve(cacheFolder, ".cached_at"),
|
||||||
|
String(Number(new Date())),
|
||||||
|
{
|
||||||
|
encoding: "utf-8",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
return models;
|
||||||
|
})
|
||||||
|
.catch((e) => {
|
||||||
|
console.error(e);
|
||||||
|
return {};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
ApiPieLLM,
|
||||||
|
fetchApiPieModels,
|
||||||
|
};
|
@ -5,7 +5,7 @@ const {
|
|||||||
} = require("../../helpers/chat/responses");
|
} = require("../../helpers/chat/responses");
|
||||||
|
|
||||||
class AzureOpenAiLLM {
|
class AzureOpenAiLLM {
|
||||||
constructor(embedder = null, _modelPreference = null) {
|
constructor(embedder = null, modelPreference = null) {
|
||||||
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
|
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
|
||||||
if (!process.env.AZURE_OPENAI_ENDPOINT)
|
if (!process.env.AZURE_OPENAI_ENDPOINT)
|
||||||
throw new Error("No Azure API endpoint was set.");
|
throw new Error("No Azure API endpoint was set.");
|
||||||
@ -16,7 +16,7 @@ class AzureOpenAiLLM {
|
|||||||
process.env.AZURE_OPENAI_ENDPOINT,
|
process.env.AZURE_OPENAI_ENDPOINT,
|
||||||
new AzureKeyCredential(process.env.AZURE_OPENAI_KEY)
|
new AzureKeyCredential(process.env.AZURE_OPENAI_KEY)
|
||||||
);
|
);
|
||||||
this.model = process.env.OPEN_MODEL_PREF;
|
this.model = modelPreference ?? process.env.OPEN_MODEL_PREF;
|
||||||
this.limits = {
|
this.limits = {
|
||||||
history: this.promptWindowLimit() * 0.15,
|
history: this.promptWindowLimit() * 0.15,
|
||||||
system: this.promptWindowLimit() * 0.15,
|
system: this.promptWindowLimit() * 0.15,
|
||||||
|
@ -7,6 +7,20 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
|||||||
|
|
||||||
// Docs: https://js.langchain.com/v0.2/docs/integrations/chat/bedrock_converse
|
// Docs: https://js.langchain.com/v0.2/docs/integrations/chat/bedrock_converse
|
||||||
class AWSBedrockLLM {
|
class AWSBedrockLLM {
|
||||||
|
/**
|
||||||
|
* These models do not support system prompts
|
||||||
|
* It is not explicitly stated but it is observed that they do not use the system prompt
|
||||||
|
* in their responses and will crash when a system prompt is provided.
|
||||||
|
* We can add more models to this list as we discover them or new models are added.
|
||||||
|
* We may want to extend this list or make a user-config if using custom bedrock models.
|
||||||
|
*/
|
||||||
|
noSystemPromptModels = [
|
||||||
|
"amazon.titan-text-express-v1",
|
||||||
|
"amazon.titan-text-lite-v1",
|
||||||
|
"cohere.command-text-v14",
|
||||||
|
"cohere.command-light-text-v14",
|
||||||
|
];
|
||||||
|
|
||||||
constructor(embedder = null, modelPreference = null) {
|
constructor(embedder = null, modelPreference = null) {
|
||||||
if (!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID)
|
if (!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID)
|
||||||
throw new Error("No AWS Bedrock LLM profile id was set.");
|
throw new Error("No AWS Bedrock LLM profile id was set.");
|
||||||
@ -32,7 +46,7 @@ class AWSBedrockLLM {
|
|||||||
#bedrockClient({ temperature = 0.7 }) {
|
#bedrockClient({ temperature = 0.7 }) {
|
||||||
const { ChatBedrockConverse } = require("@langchain/aws");
|
const { ChatBedrockConverse } = require("@langchain/aws");
|
||||||
return new ChatBedrockConverse({
|
return new ChatBedrockConverse({
|
||||||
model: process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE,
|
model: this.model,
|
||||||
region: process.env.AWS_BEDROCK_LLM_REGION,
|
region: process.env.AWS_BEDROCK_LLM_REGION,
|
||||||
credentials: {
|
credentials: {
|
||||||
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
|
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
|
||||||
@ -59,6 +73,22 @@ class AWSBedrockLLM {
|
|||||||
|
|
||||||
for (const chat of chats) {
|
for (const chat of chats) {
|
||||||
if (!roleToMessageMap.hasOwnProperty(chat.role)) continue;
|
if (!roleToMessageMap.hasOwnProperty(chat.role)) continue;
|
||||||
|
|
||||||
|
// When a model does not support system prompts, we need to handle it.
|
||||||
|
// We will add a new message that simulates the system prompt via a user message and AI response.
|
||||||
|
// This will allow the model to respond without crashing but we can still inject context.
|
||||||
|
if (
|
||||||
|
this.noSystemPromptModels.includes(this.model) &&
|
||||||
|
chat.role === "system"
|
||||||
|
) {
|
||||||
|
this.#log(
|
||||||
|
`Model does not support system prompts! Simulating system prompt via Human/AI message pairs.`
|
||||||
|
);
|
||||||
|
langchainChats.push(new HumanMessage({ content: chat.content }));
|
||||||
|
langchainChats.push(new AIMessage({ content: "Okay." }));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
const MessageClass = roleToMessageMap[chat.role];
|
const MessageClass = roleToMessageMap[chat.role];
|
||||||
langchainChats.push(new MessageClass({ content: chat.content }));
|
langchainChats.push(new MessageClass({ content: chat.content }));
|
||||||
}
|
}
|
||||||
@ -78,6 +108,10 @@ class AWSBedrockLLM {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#log(text, ...args) {
|
||||||
|
console.log(`\x1b[32m[AWSBedrock]\x1b[0m ${text}`, ...args);
|
||||||
|
}
|
||||||
|
|
||||||
streamingEnabled() {
|
streamingEnabled() {
|
||||||
return "streamGetChatCompletion" in this;
|
return "streamGetChatCompletion" in this;
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,10 @@ class GroqLLM {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#log(text, ...args) {
|
||||||
|
console.log(`\x1b[32m[GroqAi]\x1b[0m ${text}`, ...args);
|
||||||
|
}
|
||||||
|
|
||||||
streamingEnabled() {
|
streamingEnabled() {
|
||||||
return "streamGetChatCompletion" in this;
|
return "streamGetChatCompletion" in this;
|
||||||
}
|
}
|
||||||
@ -53,17 +57,111 @@ class GroqLLM {
|
|||||||
return !!modelName; // name just needs to exist
|
return !!modelName; // name just needs to exist
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates appropriate content array for a message + attachments.
|
||||||
|
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
|
||||||
|
* @returns {string|object[]}
|
||||||
|
*/
|
||||||
|
#generateContent({ userPrompt, attachments = [] }) {
|
||||||
|
if (!attachments.length) return userPrompt;
|
||||||
|
|
||||||
|
const content = [{ type: "text", text: userPrompt }];
|
||||||
|
for (let attachment of attachments) {
|
||||||
|
content.push({
|
||||||
|
type: "image_url",
|
||||||
|
image_url: {
|
||||||
|
url: attachment.contentString,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return content.flat();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Last Updated: October 21, 2024
|
||||||
|
* According to https://console.groq.com/docs/vision
|
||||||
|
* the vision models supported all make a mess of prompting depending on the model.
|
||||||
|
* Currently the llama3.2 models are only in preview and subject to change and the llava model is deprecated - so we will not support attachments for that at all.
|
||||||
|
*
|
||||||
|
* Since we can only explicitly support the current models, this is a temporary solution.
|
||||||
|
* If the attachments are empty or the model is not a vision model, we will return the default prompt structure which will work for all models.
|
||||||
|
* If the attachments are present and the model is a vision model - we only return the user prompt with attachments - see comment at end of function for more.
|
||||||
|
*/
|
||||||
|
#conditionalPromptStruct({
|
||||||
|
systemPrompt = "",
|
||||||
|
contextTexts = [],
|
||||||
|
chatHistory = [],
|
||||||
|
userPrompt = "",
|
||||||
|
attachments = [], // This is the specific attachment for only this prompt
|
||||||
|
}) {
|
||||||
|
const VISION_MODELS = [
|
||||||
|
"llama-3.2-90b-vision-preview",
|
||||||
|
"llama-3.2-11b-vision-preview",
|
||||||
|
];
|
||||||
|
const DEFAULT_PROMPT_STRUCT = [
|
||||||
|
{
|
||||||
|
role: "system",
|
||||||
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||||
|
},
|
||||||
|
...chatHistory,
|
||||||
|
{ role: "user", content: userPrompt },
|
||||||
|
];
|
||||||
|
|
||||||
|
// If there are no attachments or model is not a vision model, return the default prompt structure
|
||||||
|
// as there is nothing to attach or do and no model limitations to consider
|
||||||
|
if (!attachments.length) return DEFAULT_PROMPT_STRUCT;
|
||||||
|
if (!VISION_MODELS.includes(this.model)) {
|
||||||
|
this.#log(
|
||||||
|
`${this.model} is not an explicitly supported vision model! Will omit attachments.`
|
||||||
|
);
|
||||||
|
return DEFAULT_PROMPT_STRUCT;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
// Why is the system prompt and history commented out?
|
||||||
|
// The current vision models for Groq perform VERY poorly with ANY history or text prior to the image.
|
||||||
|
// In order to not get LLM refusals for every single message, we will not include the "system prompt" or even the chat history.
|
||||||
|
// This is a temporary solution until Groq fixes their vision models to be more coherent and also handle context prior to the image.
|
||||||
|
// Note for the future:
|
||||||
|
// Groq vision models also do not support system prompts - which is why you see the user/assistant emulation used instead of "system".
|
||||||
|
// This means any vision call is assessed independently of the chat context prior to the image.
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// {
|
||||||
|
// role: "user",
|
||||||
|
// content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// role: "assistant",
|
||||||
|
// content: "OK",
|
||||||
|
// },
|
||||||
|
// ...chatHistory,
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: this.#generateContent({ userPrompt, attachments }),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct the user prompt for this model.
|
||||||
|
* @param {{attachments: import("../../helpers").Attachment[]}} param0
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
constructPrompt({
|
constructPrompt({
|
||||||
systemPrompt = "",
|
systemPrompt = "",
|
||||||
contextTexts = [],
|
contextTexts = [],
|
||||||
chatHistory = [],
|
chatHistory = [],
|
||||||
userPrompt = "",
|
userPrompt = "",
|
||||||
|
attachments = [], // This is the specific attachment for only this prompt
|
||||||
}) {
|
}) {
|
||||||
const prompt = {
|
// NOTICE: SEE GroqLLM.#conditionalPromptStruct for more information on how attachments are handled with Groq.
|
||||||
role: "system",
|
return this.#conditionalPromptStruct({
|
||||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
systemPrompt,
|
||||||
};
|
contextTexts,
|
||||||
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
|
chatHistory,
|
||||||
|
userPrompt,
|
||||||
|
attachments,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
@ -5,7 +5,7 @@ const {
|
|||||||
|
|
||||||
// hybrid of openAi LLM chat completion for LMStudio
|
// hybrid of openAi LLM chat completion for LMStudio
|
||||||
class LMStudioLLM {
|
class LMStudioLLM {
|
||||||
constructor(embedder = null, _modelPreference = null) {
|
constructor(embedder = null, modelPreference = null) {
|
||||||
if (!process.env.LMSTUDIO_BASE_PATH)
|
if (!process.env.LMSTUDIO_BASE_PATH)
|
||||||
throw new Error("No LMStudio API Base Path was set.");
|
throw new Error("No LMStudio API Base Path was set.");
|
||||||
|
|
||||||
@ -21,7 +21,10 @@ class LMStudioLLM {
|
|||||||
// and any other value will crash inferencing. So until this is patched we will
|
// and any other value will crash inferencing. So until this is patched we will
|
||||||
// try to fetch the `/models` and have the user set it, or just fallback to "Loaded from Chat UI"
|
// try to fetch the `/models` and have the user set it, or just fallback to "Loaded from Chat UI"
|
||||||
// which will not impact users with <v0.2.17 and should work as well once the bug is fixed.
|
// which will not impact users with <v0.2.17 and should work as well once the bug is fixed.
|
||||||
this.model = process.env.LMSTUDIO_MODEL_PREF || "Loaded from Chat UI";
|
this.model =
|
||||||
|
modelPreference ||
|
||||||
|
process.env.LMSTUDIO_MODEL_PREF ||
|
||||||
|
"Loaded from Chat UI";
|
||||||
this.limits = {
|
this.limits = {
|
||||||
history: this.promptWindowLimit() * 0.15,
|
history: this.promptWindowLimit() * 0.15,
|
||||||
system: this.promptWindowLimit() * 0.15,
|
system: this.promptWindowLimit() * 0.15,
|
||||||
|
@ -52,11 +52,18 @@ const MODEL_MAP = {
|
|||||||
"gpt-4-turbo-preview": 128_000,
|
"gpt-4-turbo-preview": 128_000,
|
||||||
"gpt-4": 8_192,
|
"gpt-4": 8_192,
|
||||||
"gpt-4-32k": 32_000,
|
"gpt-4-32k": 32_000,
|
||||||
|
"o1-preview": 128_000,
|
||||||
|
"o1-preview-2024-09-12": 128_000,
|
||||||
|
"o1-mini": 128_000,
|
||||||
|
"o1-mini-2024-09-12": 128_000,
|
||||||
},
|
},
|
||||||
deepseek: {
|
deepseek: {
|
||||||
"deepseek-chat": 128_000,
|
"deepseek-chat": 128_000,
|
||||||
"deepseek-coder": 128_000,
|
"deepseek-coder": 128_000,
|
||||||
},
|
},
|
||||||
|
xai: {
|
||||||
|
"grok-beta": 131_072,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = { MODEL_MAP };
|
module.exports = { MODEL_MAP };
|
||||||
|
@ -23,6 +23,14 @@ class OpenAiLLM {
|
|||||||
this.defaultTemp = 0.7;
|
this.defaultTemp = 0.7;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the model is an o1 model.
|
||||||
|
* @returns {boolean}
|
||||||
|
*/
|
||||||
|
get isO1Model() {
|
||||||
|
return this.model.startsWith("o1");
|
||||||
|
}
|
||||||
|
|
||||||
#appendContext(contextTexts = []) {
|
#appendContext(contextTexts = []) {
|
||||||
if (!contextTexts || !contextTexts.length) return "";
|
if (!contextTexts || !contextTexts.length) return "";
|
||||||
return (
|
return (
|
||||||
@ -36,6 +44,7 @@ class OpenAiLLM {
|
|||||||
}
|
}
|
||||||
|
|
||||||
streamingEnabled() {
|
streamingEnabled() {
|
||||||
|
if (this.isO1Model) return false;
|
||||||
return "streamGetChatCompletion" in this;
|
return "streamGetChatCompletion" in this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,8 +107,11 @@ class OpenAiLLM {
|
|||||||
userPrompt = "",
|
userPrompt = "",
|
||||||
attachments = [], // This is the specific attachment for only this prompt
|
attachments = [], // This is the specific attachment for only this prompt
|
||||||
}) {
|
}) {
|
||||||
|
// o1 Models do not support the "system" role
|
||||||
|
// in order to combat this, we can use the "user" role as a replacement for now
|
||||||
|
// https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880
|
||||||
const prompt = {
|
const prompt = {
|
||||||
role: "system",
|
role: this.isO1Model ? "user" : "system",
|
||||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||||
};
|
};
|
||||||
return [
|
return [
|
||||||
@ -122,7 +134,7 @@ class OpenAiLLM {
|
|||||||
.create({
|
.create({
|
||||||
model: this.model,
|
model: this.model,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
|
||||||
})
|
})
|
||||||
.catch((e) => {
|
.catch((e) => {
|
||||||
throw new Error(e.message);
|
throw new Error(e.message);
|
||||||
@ -143,7 +155,7 @@ class OpenAiLLM {
|
|||||||
model: this.model,
|
model: this.model,
|
||||||
stream: true,
|
stream: true,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
|
||||||
});
|
});
|
||||||
return streamRequest;
|
return streamRequest;
|
||||||
}
|
}
|
||||||
|
168
server/utils/AiProviders/xai/index.js
Normal file
168
server/utils/AiProviders/xai/index.js
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||||
|
const {
|
||||||
|
handleDefaultStreamResponseV2,
|
||||||
|
} = require("../../helpers/chat/responses");
|
||||||
|
const { MODEL_MAP } = require("../modelMap");
|
||||||
|
|
||||||
|
class XAiLLM {
|
||||||
|
constructor(embedder = null, modelPreference = null) {
|
||||||
|
if (!process.env.XAI_LLM_API_KEY)
|
||||||
|
throw new Error("No xAI API key was set.");
|
||||||
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
|
||||||
|
this.openai = new OpenAIApi({
|
||||||
|
baseURL: "https://api.x.ai/v1",
|
||||||
|
apiKey: process.env.XAI_LLM_API_KEY,
|
||||||
|
});
|
||||||
|
this.model =
|
||||||
|
modelPreference || process.env.XAI_LLM_MODEL_PREF || "grok-beta";
|
||||||
|
this.limits = {
|
||||||
|
history: this.promptWindowLimit() * 0.15,
|
||||||
|
system: this.promptWindowLimit() * 0.15,
|
||||||
|
user: this.promptWindowLimit() * 0.7,
|
||||||
|
};
|
||||||
|
|
||||||
|
this.embedder = embedder ?? new NativeEmbedder();
|
||||||
|
this.defaultTemp = 0.7;
|
||||||
|
}
|
||||||
|
|
||||||
|
#appendContext(contextTexts = []) {
|
||||||
|
if (!contextTexts || !contextTexts.length) return "";
|
||||||
|
return (
|
||||||
|
"\nContext:\n" +
|
||||||
|
contextTexts
|
||||||
|
.map((text, i) => {
|
||||||
|
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||||
|
})
|
||||||
|
.join("")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamingEnabled() {
|
||||||
|
return "streamGetChatCompletion" in this;
|
||||||
|
}
|
||||||
|
|
||||||
|
static promptWindowLimit(modelName) {
|
||||||
|
return MODEL_MAP.xai[modelName] ?? 131_072;
|
||||||
|
}
|
||||||
|
|
||||||
|
promptWindowLimit() {
|
||||||
|
return MODEL_MAP.xai[this.model] ?? 131_072;
|
||||||
|
}
|
||||||
|
|
||||||
|
isValidChatCompletionModel(modelName = "") {
|
||||||
|
switch (modelName) {
|
||||||
|
case "grok-beta":
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates appropriate content array for a message + attachments.
|
||||||
|
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
|
||||||
|
* @returns {string|object[]}
|
||||||
|
*/
|
||||||
|
#generateContent({ userPrompt, attachments = [] }) {
|
||||||
|
if (!attachments.length) {
|
||||||
|
return userPrompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = [{ type: "text", text: userPrompt }];
|
||||||
|
for (let attachment of attachments) {
|
||||||
|
content.push({
|
||||||
|
type: "image_url",
|
||||||
|
image_url: {
|
||||||
|
url: attachment.contentString,
|
||||||
|
detail: "high",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return content.flat();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct the user prompt for this model.
|
||||||
|
* @param {{attachments: import("../../helpers").Attachment[]}} param0
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
|
constructPrompt({
|
||||||
|
systemPrompt = "",
|
||||||
|
contextTexts = [],
|
||||||
|
chatHistory = [],
|
||||||
|
userPrompt = "",
|
||||||
|
attachments = [], // This is the specific attachment for only this prompt
|
||||||
|
}) {
|
||||||
|
const prompt = {
|
||||||
|
role: "system",
|
||||||
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||||
|
};
|
||||||
|
return [
|
||||||
|
prompt,
|
||||||
|
...chatHistory,
|
||||||
|
{
|
||||||
|
role: "user",
|
||||||
|
content: this.#generateContent({ userPrompt, attachments }),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
if (!this.isValidChatCompletionModel(this.model))
|
||||||
|
throw new Error(
|
||||||
|
`xAI chat: ${this.model} is not valid for chat completion!`
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await this.openai.chat.completions
|
||||||
|
.create({
|
||||||
|
model: this.model,
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
})
|
||||||
|
.catch((e) => {
|
||||||
|
throw new Error(e.message);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
|
||||||
|
return null;
|
||||||
|
return result.choices[0].message.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
if (!this.isValidChatCompletionModel(this.model))
|
||||||
|
throw new Error(
|
||||||
|
`xAI chat: ${this.model} is not valid for chat completion!`
|
||||||
|
);
|
||||||
|
|
||||||
|
const streamRequest = await this.openai.chat.completions.create({
|
||||||
|
model: this.model,
|
||||||
|
stream: true,
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
});
|
||||||
|
return streamRequest;
|
||||||
|
}
|
||||||
|
|
||||||
|
handleStream(response, stream, responseProps) {
|
||||||
|
return handleDefaultStreamResponseV2(response, stream, responseProps);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||||
|
async embedTextInput(textInput) {
|
||||||
|
return await this.embedder.embedTextInput(textInput);
|
||||||
|
}
|
||||||
|
async embedChunks(textChunks = []) {
|
||||||
|
return await this.embedder.embedChunks(textChunks);
|
||||||
|
}
|
||||||
|
|
||||||
|
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||||
|
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||||
|
const messageArray = this.constructPrompt(promptArgs);
|
||||||
|
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
XAiLLM,
|
||||||
|
};
|
@ -11,7 +11,7 @@ class VoyageAiEmbedder {
|
|||||||
});
|
});
|
||||||
|
|
||||||
this.voyage = voyage;
|
this.voyage = voyage;
|
||||||
this.model = process.env.EMBEDDING_MODEL_PREF || "voyage-large-2-instruct";
|
this.model = process.env.EMBEDDING_MODEL_PREF || "voyage-3-lite";
|
||||||
|
|
||||||
// Limit of how many strings we can process in a single pass to stay with resource or network limits
|
// Limit of how many strings we can process in a single pass to stay with resource or network limits
|
||||||
this.batchSize = 128; // Voyage AI's limit per request is 128 https://docs.voyageai.com/docs/rate-limits#use-larger-batches
|
this.batchSize = 128; // Voyage AI's limit per request is 128 https://docs.voyageai.com/docs/rate-limits#use-larger-batches
|
||||||
@ -23,6 +23,8 @@ class VoyageAiEmbedder {
|
|||||||
switch (this.model) {
|
switch (this.model) {
|
||||||
case "voyage-finance-2":
|
case "voyage-finance-2":
|
||||||
case "voyage-multilingual-2":
|
case "voyage-multilingual-2":
|
||||||
|
case "voyage-3":
|
||||||
|
case "voyage-3-lite":
|
||||||
return 32_000;
|
return 32_000;
|
||||||
case "voyage-large-2-instruct":
|
case "voyage-large-2-instruct":
|
||||||
case "voyage-law-2":
|
case "voyage-law-2":
|
||||||
|
@ -7,6 +7,9 @@ function getTTSProvider() {
|
|||||||
case "elevenlabs":
|
case "elevenlabs":
|
||||||
const { ElevenLabsTTS } = require("./elevenLabs");
|
const { ElevenLabsTTS } = require("./elevenLabs");
|
||||||
return new ElevenLabsTTS();
|
return new ElevenLabsTTS();
|
||||||
|
case "generic-openai":
|
||||||
|
const { GenericOpenAiTTS } = require("./openAiGeneric");
|
||||||
|
return new GenericOpenAiTTS();
|
||||||
default:
|
default:
|
||||||
throw new Error("ENV: No TTS_PROVIDER value found in environment!");
|
throw new Error("ENV: No TTS_PROVIDER value found in environment!");
|
||||||
}
|
}
|
||||||
|
50
server/utils/TextToSpeech/openAiGeneric/index.js
Normal file
50
server/utils/TextToSpeech/openAiGeneric/index.js
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
class GenericOpenAiTTS {
|
||||||
|
constructor() {
|
||||||
|
if (!process.env.TTS_OPEN_AI_COMPATIBLE_KEY)
|
||||||
|
this.#log(
|
||||||
|
"No OpenAI compatible API key was set. You might need to set this to use your OpenAI compatible TTS service."
|
||||||
|
);
|
||||||
|
if (!process.env.TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL)
|
||||||
|
this.#log(
|
||||||
|
"No OpenAI compatible voice model was set. We will use the default voice model 'alloy'. This may not exist for your selected endpoint."
|
||||||
|
);
|
||||||
|
if (!process.env.TTS_OPEN_AI_COMPATIBLE_ENDPOINT)
|
||||||
|
throw new Error(
|
||||||
|
"No OpenAI compatible endpoint was set. Please set this to use your OpenAI compatible TTS service."
|
||||||
|
);
|
||||||
|
|
||||||
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
this.openai = new OpenAIApi({
|
||||||
|
apiKey: process.env.TTS_OPEN_AI_COMPATIBLE_KEY || null,
|
||||||
|
baseURL: process.env.TTS_OPEN_AI_COMPATIBLE_ENDPOINT,
|
||||||
|
});
|
||||||
|
this.voice = process.env.TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL ?? "alloy";
|
||||||
|
}
|
||||||
|
|
||||||
|
#log(text, ...args) {
|
||||||
|
console.log(`\x1b[32m[OpenAiGenericTTS]\x1b[0m ${text}`, ...args);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a buffer from the given text input using the OpenAI compatible TTS service.
|
||||||
|
* @param {string} textInput - The text to be converted to audio.
|
||||||
|
* @returns {Promise<Buffer>} A buffer containing the audio data.
|
||||||
|
*/
|
||||||
|
async ttsBuffer(textInput) {
|
||||||
|
try {
|
||||||
|
const result = await this.openai.audio.speech.create({
|
||||||
|
model: "tts-1",
|
||||||
|
voice: this.voice,
|
||||||
|
input: textInput,
|
||||||
|
});
|
||||||
|
return Buffer.from(await result.arrayBuffer());
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
GenericOpenAiTTS,
|
||||||
|
};
|
@ -756,7 +756,7 @@ ${this.getHistory({ to: route.to })
|
|||||||
case "anthropic":
|
case "anthropic":
|
||||||
return new Providers.AnthropicProvider({ model: config.model });
|
return new Providers.AnthropicProvider({ model: config.model });
|
||||||
case "lmstudio":
|
case "lmstudio":
|
||||||
return new Providers.LMStudioProvider({});
|
return new Providers.LMStudioProvider({ model: config.model });
|
||||||
case "ollama":
|
case "ollama":
|
||||||
return new Providers.OllamaProvider({ model: config.model });
|
return new Providers.OllamaProvider({ model: config.model });
|
||||||
case "groq":
|
case "groq":
|
||||||
@ -785,6 +785,12 @@ ${this.getHistory({ to: route.to })
|
|||||||
return new Providers.FireworksAIProvider({ model: config.model });
|
return new Providers.FireworksAIProvider({ model: config.model });
|
||||||
case "deepseek":
|
case "deepseek":
|
||||||
return new Providers.DeepSeekProvider({ model: config.model });
|
return new Providers.DeepSeekProvider({ model: config.model });
|
||||||
|
case "litellm":
|
||||||
|
return new Providers.LiteLLMProvider({ model: config.model });
|
||||||
|
case "apipie":
|
||||||
|
return new Providers.ApiPieProvider({ model: config.model });
|
||||||
|
case "xai":
|
||||||
|
return new Providers.XAIProvider({ model: config.model });
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw new Error(
|
throw new Error(
|
||||||
|
@ -77,6 +77,9 @@ const webBrowsing = {
|
|||||||
case "searxng-engine":
|
case "searxng-engine":
|
||||||
engine = "_searXNGEngine";
|
engine = "_searXNGEngine";
|
||||||
break;
|
break;
|
||||||
|
case "tavily-search":
|
||||||
|
engine = "_tavilySearch";
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
engine = "_googleSearchEngine";
|
engine = "_googleSearchEngine";
|
||||||
}
|
}
|
||||||
@ -436,6 +439,59 @@ const webBrowsing = {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (data.length === 0)
|
||||||
|
return `No information was found online for the search query.`;
|
||||||
|
this.super.introspect(
|
||||||
|
`${this.caller}: I found ${data.length} results - looking over them now.`
|
||||||
|
);
|
||||||
|
return JSON.stringify(data);
|
||||||
|
},
|
||||||
|
_tavilySearch: async function (query) {
|
||||||
|
if (!process.env.AGENT_TAVILY_API_KEY) {
|
||||||
|
this.super.introspect(
|
||||||
|
`${this.caller}: I can't use Tavily searching because the user has not defined the required API key.\nVisit: https://tavily.com/ to create the API key.`
|
||||||
|
);
|
||||||
|
return `Search is disabled and no content was found. This functionality is disabled because the user has not set it up yet.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.super.introspect(
|
||||||
|
`${this.caller}: Using Tavily to search for "${
|
||||||
|
query.length > 100 ? `${query.slice(0, 100)}...` : query
|
||||||
|
}"`
|
||||||
|
);
|
||||||
|
|
||||||
|
const url = "https://api.tavily.com/search";
|
||||||
|
const { response, error } = await fetch(url, {
|
||||||
|
method: "POST",
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
api_key: process.env.AGENT_TAVILY_API_KEY,
|
||||||
|
query: query,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
.then((res) => res.json())
|
||||||
|
.then((data) => {
|
||||||
|
return { response: data, error: null };
|
||||||
|
})
|
||||||
|
.catch((e) => {
|
||||||
|
return { response: null, error: e.message };
|
||||||
|
});
|
||||||
|
|
||||||
|
if (error)
|
||||||
|
return `There was an error searching for content. ${error}`;
|
||||||
|
|
||||||
|
const data = [];
|
||||||
|
response.results?.forEach((searchResult) => {
|
||||||
|
const { title, url, content } = searchResult;
|
||||||
|
data.push({
|
||||||
|
title,
|
||||||
|
link: url,
|
||||||
|
snippet: content,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
if (data.length === 0)
|
if (data.length === 0)
|
||||||
return `No information was found online for the search query.`;
|
return `No information was found online for the search query.`;
|
||||||
this.super.introspect(
|
this.super.introspect(
|
||||||
|
@ -130,6 +130,30 @@ class Provider {
|
|||||||
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
|
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
|
||||||
...config,
|
...config,
|
||||||
});
|
});
|
||||||
|
case "apipie":
|
||||||
|
return new ChatOpenAI({
|
||||||
|
configuration: {
|
||||||
|
baseURL: "https://apipie.ai/v1",
|
||||||
|
},
|
||||||
|
apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
|
||||||
|
...config,
|
||||||
|
});
|
||||||
|
case "deepseek":
|
||||||
|
return new ChatOpenAI({
|
||||||
|
configuration: {
|
||||||
|
baseURL: "https://api.deepseek.com/v1",
|
||||||
|
},
|
||||||
|
apiKey: process.env.DEEPSEEK_API_KEY ?? null,
|
||||||
|
...config,
|
||||||
|
});
|
||||||
|
case "xai":
|
||||||
|
return new ChatOpenAI({
|
||||||
|
configuration: {
|
||||||
|
baseURL: "https://api.x.ai/v1",
|
||||||
|
},
|
||||||
|
apiKey: process.env.XAI_LLM_API_KEY ?? null,
|
||||||
|
...config,
|
||||||
|
});
|
||||||
|
|
||||||
// OSS Model Runners
|
// OSS Model Runners
|
||||||
// case "anythingllm_ollama":
|
// case "anythingllm_ollama":
|
||||||
@ -174,14 +198,15 @@ class Provider {
|
|||||||
apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? "not-used",
|
apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? "not-used",
|
||||||
...config,
|
...config,
|
||||||
});
|
});
|
||||||
case "deepseek":
|
case "litellm":
|
||||||
return new ChatOpenAI({
|
return new ChatOpenAI({
|
||||||
configuration: {
|
configuration: {
|
||||||
baseURL: "https://api.deepseek.com/v1",
|
baseURL: process.env.LITE_LLM_BASE_PATH,
|
||||||
},
|
},
|
||||||
apiKey: process.env.DEEPSEEK_API_KEY ?? null,
|
apiKey: process.env.LITE_LLM_API_KEY ?? null,
|
||||||
...config,
|
...config,
|
||||||
});
|
});
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw new Error(`Unsupported provider ${provider} for this task.`);
|
throw new Error(`Unsupported provider ${provider} for this task.`);
|
||||||
}
|
}
|
||||||
|
116
server/utils/agents/aibitat/providers/apipie.js
Normal file
116
server/utils/agents/aibitat/providers/apipie.js
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
const OpenAI = require("openai");
|
||||||
|
const Provider = require("./ai-provider.js");
|
||||||
|
const InheritMultiple = require("./helpers/classes.js");
|
||||||
|
const UnTooled = require("./helpers/untooled.js");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The agent provider for the OpenRouter provider.
|
||||||
|
*/
|
||||||
|
class ApiPieProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||||
|
model;
|
||||||
|
|
||||||
|
constructor(config = {}) {
|
||||||
|
const { model = "openrouter/llama-3.1-8b-instruct" } = config;
|
||||||
|
super();
|
||||||
|
const client = new OpenAI({
|
||||||
|
baseURL: "https://apipie.ai/v1",
|
||||||
|
apiKey: process.env.APIPIE_LLM_API_KEY,
|
||||||
|
maxRetries: 3,
|
||||||
|
});
|
||||||
|
|
||||||
|
this._client = client;
|
||||||
|
this.model = model;
|
||||||
|
this.verbose = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
get client() {
|
||||||
|
return this._client;
|
||||||
|
}
|
||||||
|
|
||||||
|
async #handleFunctionCallChat({ messages = [] }) {
|
||||||
|
return await this.client.chat.completions
|
||||||
|
.create({
|
||||||
|
model: this.model,
|
||||||
|
temperature: 0,
|
||||||
|
messages,
|
||||||
|
})
|
||||||
|
.then((result) => {
|
||||||
|
if (!result.hasOwnProperty("choices"))
|
||||||
|
throw new Error("ApiPie chat: No results!");
|
||||||
|
if (result.choices.length === 0)
|
||||||
|
throw new Error("ApiPie chat: No results length!");
|
||||||
|
return result.choices[0].message.content;
|
||||||
|
})
|
||||||
|
.catch((_) => {
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a completion based on the received messages.
|
||||||
|
*
|
||||||
|
* @param messages A list of messages to send to the API.
|
||||||
|
* @param functions
|
||||||
|
* @returns The completion.
|
||||||
|
*/
|
||||||
|
async complete(messages, functions = null) {
|
||||||
|
try {
|
||||||
|
let completion;
|
||||||
|
if (functions.length > 0) {
|
||||||
|
const { toolCall, text } = await this.functionCall(
|
||||||
|
messages,
|
||||||
|
functions,
|
||||||
|
this.#handleFunctionCallChat.bind(this)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (toolCall !== null) {
|
||||||
|
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||||
|
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||||
|
return {
|
||||||
|
result: null,
|
||||||
|
functionCall: {
|
||||||
|
name: toolCall.name,
|
||||||
|
arguments: toolCall.arguments,
|
||||||
|
},
|
||||||
|
cost: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
completion = { content: text };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!completion?.content) {
|
||||||
|
this.providerLog(
|
||||||
|
"Will assume chat completion without tool call inputs."
|
||||||
|
);
|
||||||
|
const response = await this.client.chat.completions.create({
|
||||||
|
model: this.model,
|
||||||
|
messages: this.cleanMsgs(messages),
|
||||||
|
});
|
||||||
|
completion = response.choices[0].message;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
|
||||||
|
// from calling the exact same function over and over in a loop within a single chat exchange
|
||||||
|
// _but_ we should enable it to call previously used tools in a new chat interaction.
|
||||||
|
this.deduplicator.reset("runs");
|
||||||
|
return {
|
||||||
|
result: completion.content,
|
||||||
|
cost: 0,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the cost of the completion.
|
||||||
|
*
|
||||||
|
* @param _usage The completion to get the cost for.
|
||||||
|
* @returns The cost of the completion.
|
||||||
|
*/
|
||||||
|
getCost(_usage) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = ApiPieProvider;
|
@ -33,7 +33,10 @@ ${JSON.stringify(def.parameters.properties, null, 4)}\n`;
|
|||||||
|
|
||||||
if (Array.isArray(def.examples)) {
|
if (Array.isArray(def.examples)) {
|
||||||
def.examples.forEach(({ prompt, call }) => {
|
def.examples.forEach(({ prompt, call }) => {
|
||||||
shotExample += `Query: "${prompt}"\nJSON: ${call}\n`;
|
shotExample += `Query: "${prompt}"\nJSON: ${JSON.stringify({
|
||||||
|
name: def.name,
|
||||||
|
arguments: safeJsonParse(call, {}),
|
||||||
|
})}\n`;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
output += `${shotExample}-----------\n`;
|
output += `${shotExample}-----------\n`;
|
||||||
|
@ -15,6 +15,9 @@ const TextWebGenUiProvider = require("./textgenwebui.js");
|
|||||||
const AWSBedrockProvider = require("./bedrock.js");
|
const AWSBedrockProvider = require("./bedrock.js");
|
||||||
const FireworksAIProvider = require("./fireworksai.js");
|
const FireworksAIProvider = require("./fireworksai.js");
|
||||||
const DeepSeekProvider = require("./deepseek.js");
|
const DeepSeekProvider = require("./deepseek.js");
|
||||||
|
const LiteLLMProvider = require("./litellm.js");
|
||||||
|
const ApiPieProvider = require("./apipie.js");
|
||||||
|
const XAIProvider = require("./xai.js");
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
OpenAIProvider,
|
OpenAIProvider,
|
||||||
@ -34,4 +37,7 @@ module.exports = {
|
|||||||
TextWebGenUiProvider,
|
TextWebGenUiProvider,
|
||||||
AWSBedrockProvider,
|
AWSBedrockProvider,
|
||||||
FireworksAIProvider,
|
FireworksAIProvider,
|
||||||
|
LiteLLMProvider,
|
||||||
|
ApiPieProvider,
|
||||||
|
XAIProvider,
|
||||||
};
|
};
|
||||||
|
110
server/utils/agents/aibitat/providers/litellm.js
Normal file
110
server/utils/agents/aibitat/providers/litellm.js
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
const OpenAI = require("openai");
|
||||||
|
const Provider = require("./ai-provider.js");
|
||||||
|
const InheritMultiple = require("./helpers/classes.js");
|
||||||
|
const UnTooled = require("./helpers/untooled.js");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The agent provider for LiteLLM.
|
||||||
|
*/
|
||||||
|
class LiteLLMProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||||
|
model;
|
||||||
|
|
||||||
|
constructor(config = {}) {
|
||||||
|
super();
|
||||||
|
const { model = null } = config;
|
||||||
|
const client = new OpenAI({
|
||||||
|
baseURL: process.env.LITE_LLM_BASE_PATH,
|
||||||
|
apiKey: process.env.LITE_LLM_API_KEY ?? null,
|
||||||
|
maxRetries: 3,
|
||||||
|
});
|
||||||
|
|
||||||
|
this._client = client;
|
||||||
|
this.model = model || process.env.LITE_LLM_MODEL_PREF;
|
||||||
|
this.verbose = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
get client() {
|
||||||
|
return this._client;
|
||||||
|
}
|
||||||
|
|
||||||
|
async #handleFunctionCallChat({ messages = [] }) {
|
||||||
|
return await this.client.chat.completions
|
||||||
|
.create({
|
||||||
|
model: this.model,
|
||||||
|
temperature: 0,
|
||||||
|
messages,
|
||||||
|
})
|
||||||
|
.then((result) => {
|
||||||
|
if (!result.hasOwnProperty("choices"))
|
||||||
|
throw new Error("LiteLLM chat: No results!");
|
||||||
|
if (result.choices.length === 0)
|
||||||
|
throw new Error("LiteLLM chat: No results length!");
|
||||||
|
return result.choices[0].message.content;
|
||||||
|
})
|
||||||
|
.catch((_) => {
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a completion based on the received messages.
|
||||||
|
*
|
||||||
|
* @param messages A list of messages to send to the API.
|
||||||
|
* @param functions
|
||||||
|
* @returns The completion.
|
||||||
|
*/
|
||||||
|
async complete(messages, functions = null) {
|
||||||
|
try {
|
||||||
|
let completion;
|
||||||
|
if (functions.length > 0) {
|
||||||
|
const { toolCall, text } = await this.functionCall(
|
||||||
|
messages,
|
||||||
|
functions,
|
||||||
|
this.#handleFunctionCallChat.bind(this)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (toolCall !== null) {
|
||||||
|
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||||
|
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||||
|
return {
|
||||||
|
result: null,
|
||||||
|
functionCall: {
|
||||||
|
name: toolCall.name,
|
||||||
|
arguments: toolCall.arguments,
|
||||||
|
},
|
||||||
|
cost: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
completion = { content: text };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!completion?.content) {
|
||||||
|
this.providerLog(
|
||||||
|
"Will assume chat completion without tool call inputs."
|
||||||
|
);
|
||||||
|
const response = await this.client.chat.completions.create({
|
||||||
|
model: this.model,
|
||||||
|
messages: this.cleanMsgs(messages),
|
||||||
|
});
|
||||||
|
completion = response.choices[0].message;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
|
||||||
|
// from calling the exact same function over and over in a loop within a single chat exchange
|
||||||
|
// _but_ we should enable it to call previously used tools in a new chat interaction.
|
||||||
|
this.deduplicator.reset("runs");
|
||||||
|
return {
|
||||||
|
result: completion.content,
|
||||||
|
cost: 0,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getCost(_usage) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = LiteLLMProvider;
|
@ -9,9 +9,14 @@ const UnTooled = require("./helpers/untooled.js");
|
|||||||
class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) {
|
class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||||
model;
|
model;
|
||||||
|
|
||||||
constructor(_config = {}) {
|
/**
|
||||||
|
*
|
||||||
|
* @param {{model?: string}} config
|
||||||
|
*/
|
||||||
|
constructor(config = {}) {
|
||||||
super();
|
super();
|
||||||
const model = process.env.LMSTUDIO_MODEL_PREF || "Loaded from Chat UI";
|
const model =
|
||||||
|
config?.model || process.env.LMSTUDIO_MODEL_PREF || "Loaded from Chat UI";
|
||||||
const client = new OpenAI({
|
const client = new OpenAI({
|
||||||
baseURL: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance
|
baseURL: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance
|
||||||
apiKey: null,
|
apiKey: null,
|
||||||
|
116
server/utils/agents/aibitat/providers/xai.js
Normal file
116
server/utils/agents/aibitat/providers/xai.js
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
const OpenAI = require("openai");
|
||||||
|
const Provider = require("./ai-provider.js");
|
||||||
|
const InheritMultiple = require("./helpers/classes.js");
|
||||||
|
const UnTooled = require("./helpers/untooled.js");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The agent provider for the xAI provider.
|
||||||
|
*/
|
||||||
|
class XAIProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||||
|
model;
|
||||||
|
|
||||||
|
constructor(config = {}) {
|
||||||
|
const { model = "grok-beta" } = config;
|
||||||
|
super();
|
||||||
|
const client = new OpenAI({
|
||||||
|
baseURL: "https://api.x.ai/v1",
|
||||||
|
apiKey: process.env.XAI_LLM_API_KEY,
|
||||||
|
maxRetries: 3,
|
||||||
|
});
|
||||||
|
|
||||||
|
this._client = client;
|
||||||
|
this.model = model;
|
||||||
|
this.verbose = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
get client() {
|
||||||
|
return this._client;
|
||||||
|
}
|
||||||
|
|
||||||
|
async #handleFunctionCallChat({ messages = [] }) {
|
||||||
|
return await this.client.chat.completions
|
||||||
|
.create({
|
||||||
|
model: this.model,
|
||||||
|
temperature: 0,
|
||||||
|
messages,
|
||||||
|
})
|
||||||
|
.then((result) => {
|
||||||
|
if (!result.hasOwnProperty("choices"))
|
||||||
|
throw new Error("xAI chat: No results!");
|
||||||
|
if (result.choices.length === 0)
|
||||||
|
throw new Error("xAI chat: No results length!");
|
||||||
|
return result.choices[0].message.content;
|
||||||
|
})
|
||||||
|
.catch((_) => {
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a completion based on the received messages.
|
||||||
|
*
|
||||||
|
* @param messages A list of messages to send to the API.
|
||||||
|
* @param functions
|
||||||
|
* @returns The completion.
|
||||||
|
*/
|
||||||
|
async complete(messages, functions = null) {
|
||||||
|
try {
|
||||||
|
let completion;
|
||||||
|
if (functions.length > 0) {
|
||||||
|
const { toolCall, text } = await this.functionCall(
|
||||||
|
messages,
|
||||||
|
functions,
|
||||||
|
this.#handleFunctionCallChat.bind(this)
|
||||||
|
);
|
||||||
|
|
||||||
|
if (toolCall !== null) {
|
||||||
|
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||||
|
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||||
|
return {
|
||||||
|
result: null,
|
||||||
|
functionCall: {
|
||||||
|
name: toolCall.name,
|
||||||
|
arguments: toolCall.arguments,
|
||||||
|
},
|
||||||
|
cost: 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
completion = { content: text };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!completion?.content) {
|
||||||
|
this.providerLog(
|
||||||
|
"Will assume chat completion without tool call inputs."
|
||||||
|
);
|
||||||
|
const response = await this.client.chat.completions.create({
|
||||||
|
model: this.model,
|
||||||
|
messages: this.cleanMsgs(messages),
|
||||||
|
});
|
||||||
|
completion = response.choices[0].message;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
|
||||||
|
// from calling the exact same function over and over in a loop within a single chat exchange
|
||||||
|
// _but_ we should enable it to call previously used tools in a new chat interaction.
|
||||||
|
this.deduplicator.reset("runs");
|
||||||
|
return {
|
||||||
|
result: completion.content,
|
||||||
|
cost: 0,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the cost of the completion.
|
||||||
|
*
|
||||||
|
* @param _usage The completion to get the cost for.
|
||||||
|
* @returns The cost of the completion.
|
||||||
|
*/
|
||||||
|
getCost(_usage) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = XAIProvider;
|
@ -99,30 +99,69 @@ class EphemeralAgentHandler extends AgentHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to find a fallback provider and model to use if the workspace
|
||||||
|
* does not have an explicit `agentProvider` and `agentModel` set.
|
||||||
|
* 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
|
||||||
|
* 2. Fallback to the system `LLM_PROVIDER` and try to load the the associated default model via ENV params or a base available model.
|
||||||
|
* 3. Otherwise, return null - will likely throw an error the user can act on.
|
||||||
|
* @returns {object|null} - An object with provider and model keys.
|
||||||
|
*/
|
||||||
|
#getFallbackProvider() {
|
||||||
|
// First, fallback to the workspace chat provider and model if they exist
|
||||||
|
if (this.#workspace.chatProvider && this.#workspace.chatModel) {
|
||||||
|
return {
|
||||||
|
provider: this.#workspace.chatProvider,
|
||||||
|
model: this.#workspace.chatModel,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// If workspace does not have chat provider and model fallback
|
||||||
|
// to system provider and try to load provider default model
|
||||||
|
const systemProvider = process.env.LLM_PROVIDER;
|
||||||
|
const systemModel = this.providerDefault(systemProvider);
|
||||||
|
if (systemProvider && systemModel) {
|
||||||
|
return {
|
||||||
|
provider: systemProvider,
|
||||||
|
model: systemModel,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Finds or assumes the model preference value to use for API calls.
|
* Finds or assumes the model preference value to use for API calls.
|
||||||
* If multi-model loading is supported, we use their agent model selection of the workspace
|
* If multi-model loading is supported, we use their agent model selection of the workspace
|
||||||
* If not supported, we attempt to fallback to the system provider value for the LLM preference
|
* If not supported, we attempt to fallback to the system provider value for the LLM preference
|
||||||
* and if that fails - we assume a reasonable base model to exist.
|
* and if that fails - we assume a reasonable base model to exist.
|
||||||
* @returns {string} the model preference value to use in API calls
|
* @returns {string|null} the model preference value to use in API calls
|
||||||
*/
|
*/
|
||||||
#fetchModel() {
|
#fetchModel() {
|
||||||
if (!Object.keys(this.noProviderModelDefault).includes(this.provider))
|
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
|
||||||
return this.#workspace.agentModel || this.providerDefault();
|
// that will set a provider and model for us to use.
|
||||||
|
if (!this.provider) {
|
||||||
|
const fallback = this.#getFallbackProvider();
|
||||||
|
if (!fallback) throw new Error("No valid provider found for the agent.");
|
||||||
|
this.provider = fallback.provider; // re-set the provider to the fallback provider so it is not null.
|
||||||
|
return fallback.model; // set its defined model based on fallback logic.
|
||||||
|
}
|
||||||
|
|
||||||
// Provider has no reliable default (cant load many models) - so we need to look at system
|
// The provider was explicitly set, so check if the workspace has an agent model set.
|
||||||
// for the model param.
|
if (this.invocation.workspace.agentModel)
|
||||||
const sysModelKey = this.noProviderModelDefault[this.provider];
|
return this.invocation.workspace.agentModel;
|
||||||
if (!!sysModelKey)
|
|
||||||
return process.env[sysModelKey] ?? this.providerDefault();
|
|
||||||
|
|
||||||
// If all else fails - look at the provider default list
|
// Otherwise, we have no model to use - so guess a default model to use via the provider
|
||||||
|
// and it's system ENV params and if that fails - we return either a base model or null.
|
||||||
return this.providerDefault();
|
return this.providerDefault();
|
||||||
}
|
}
|
||||||
|
|
||||||
#providerSetupAndCheck() {
|
#providerSetupAndCheck() {
|
||||||
this.provider = this.#workspace.agentProvider;
|
this.provider = this.#workspace.agentProvider ?? null;
|
||||||
this.model = this.#fetchModel();
|
this.model = this.#fetchModel();
|
||||||
|
|
||||||
|
if (!this.provider)
|
||||||
|
throw new Error("No valid provider found for the agent.");
|
||||||
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
|
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
|
||||||
this.checkSetup();
|
this.checkSetup();
|
||||||
}
|
}
|
||||||
|
@ -11,13 +11,6 @@ const ImportedPlugin = require("./imported");
|
|||||||
class AgentHandler {
|
class AgentHandler {
|
||||||
#invocationUUID;
|
#invocationUUID;
|
||||||
#funcsToLoad = [];
|
#funcsToLoad = [];
|
||||||
noProviderModelDefault = {
|
|
||||||
azure: "OPEN_MODEL_PREF",
|
|
||||||
lmstudio: "LMSTUDIO_MODEL_PREF",
|
|
||||||
textgenwebui: null, // does not even use `model` in API req
|
|
||||||
"generic-openai": "GENERIC_OPEN_AI_MODEL_PREF",
|
|
||||||
bedrock: "AWS_BEDROCK_LLM_MODEL_PREFERENCE",
|
|
||||||
};
|
|
||||||
invocation = null;
|
invocation = null;
|
||||||
aibitat = null;
|
aibitat = null;
|
||||||
channel = null;
|
channel = null;
|
||||||
@ -166,6 +159,20 @@ class AgentHandler {
|
|||||||
if (!process.env.DEEPSEEK_API_KEY)
|
if (!process.env.DEEPSEEK_API_KEY)
|
||||||
throw new Error("DeepSeek API Key must be provided to use agents.");
|
throw new Error("DeepSeek API Key must be provided to use agents.");
|
||||||
break;
|
break;
|
||||||
|
case "litellm":
|
||||||
|
if (!process.env.LITE_LLM_BASE_PATH)
|
||||||
|
throw new Error(
|
||||||
|
"LiteLLM API base path and key must be provided to use agents."
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "apipie":
|
||||||
|
if (!process.env.APIPIE_LLM_API_KEY)
|
||||||
|
throw new Error("ApiPie API Key must be provided to use agents.");
|
||||||
|
break;
|
||||||
|
case "xai":
|
||||||
|
if (!process.env.XAI_LLM_API_KEY)
|
||||||
|
throw new Error("xAI API Key must be provided to use agents.");
|
||||||
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw new Error(
|
throw new Error(
|
||||||
@ -174,49 +181,72 @@ class AgentHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finds the default model for a given provider. If no default model is set for it's associated ENV then
|
||||||
|
* it will return a reasonable base model for the provider if one exists.
|
||||||
|
* @param {string} provider - The provider to find the default model for.
|
||||||
|
* @returns {string|null} The default model for the provider.
|
||||||
|
*/
|
||||||
providerDefault(provider = this.provider) {
|
providerDefault(provider = this.provider) {
|
||||||
switch (provider) {
|
switch (provider) {
|
||||||
case "openai":
|
case "openai":
|
||||||
return "gpt-4o";
|
return process.env.OPEN_MODEL_PREF ?? "gpt-4o";
|
||||||
case "anthropic":
|
case "anthropic":
|
||||||
return "claude-3-sonnet-20240229";
|
return process.env.ANTHROPIC_MODEL_PREF ?? "claude-3-sonnet-20240229";
|
||||||
case "lmstudio":
|
case "lmstudio":
|
||||||
return "server-default";
|
return process.env.LMSTUDIO_MODEL_PREF ?? "server-default";
|
||||||
case "ollama":
|
case "ollama":
|
||||||
return "llama3:latest";
|
return process.env.OLLAMA_MODEL_PREF ?? "llama3:latest";
|
||||||
case "groq":
|
case "groq":
|
||||||
return "llama3-70b-8192";
|
return process.env.GROQ_MODEL_PREF ?? "llama3-70b-8192";
|
||||||
case "togetherai":
|
case "togetherai":
|
||||||
return "mistralai/Mixtral-8x7B-Instruct-v0.1";
|
return (
|
||||||
|
process.env.TOGETHER_AI_MODEL_PREF ??
|
||||||
|
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||||
|
);
|
||||||
case "azure":
|
case "azure":
|
||||||
return "gpt-3.5-turbo";
|
return null;
|
||||||
case "koboldcpp":
|
case "koboldcpp":
|
||||||
return null;
|
return process.env.KOBOLD_CPP_MODEL_PREF ?? null;
|
||||||
case "gemini":
|
case "gemini":
|
||||||
return "gemini-pro";
|
return process.env.GEMINI_MODEL_PREF ?? "gemini-pro";
|
||||||
case "localai":
|
case "localai":
|
||||||
return null;
|
return process.env.LOCAL_AI_MODEL_PREF ?? null;
|
||||||
case "openrouter":
|
case "openrouter":
|
||||||
return "openrouter/auto";
|
return process.env.OPENROUTER_MODEL_PREF ?? "openrouter/auto";
|
||||||
case "mistral":
|
case "mistral":
|
||||||
return "mistral-medium";
|
return process.env.MISTRAL_MODEL_PREF ?? "mistral-medium";
|
||||||
case "generic-openai":
|
case "generic-openai":
|
||||||
return null;
|
return process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
|
||||||
case "perplexity":
|
case "perplexity":
|
||||||
return "sonar-small-online";
|
return process.env.PERPLEXITY_MODEL_PREF ?? "sonar-small-online";
|
||||||
case "textgenwebui":
|
case "textgenwebui":
|
||||||
return null;
|
return null;
|
||||||
case "bedrock":
|
case "bedrock":
|
||||||
return null;
|
return process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null;
|
||||||
case "fireworksai":
|
case "fireworksai":
|
||||||
return null;
|
return process.env.FIREWORKS_AI_LLM_MODEL_PREF ?? null;
|
||||||
case "deepseek":
|
case "deepseek":
|
||||||
return "deepseek-chat";
|
return process.env.DEEPSEEK_MODEL_PREF ?? "deepseek-chat";
|
||||||
|
case "litellm":
|
||||||
|
return process.env.LITE_LLM_MODEL_PREF ?? null;
|
||||||
|
case "apipie":
|
||||||
|
return process.env.APIPIE_LLM_MODEL_PREF ?? null;
|
||||||
|
case "xai":
|
||||||
|
return process.env.XAI_LLM_MODEL_PREF ?? "grok-beta";
|
||||||
default:
|
default:
|
||||||
return "unknown";
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to find a fallback provider and model to use if the workspace
|
||||||
|
* does not have an explicit `agentProvider` and `agentModel` set.
|
||||||
|
* 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
|
||||||
|
* 2. Fallback to the system `LLM_PROVIDER` and try to load the the associated default model via ENV params or a base available model.
|
||||||
|
* 3. Otherwise, return null - will likely throw an error the user can act on.
|
||||||
|
* @returns {object|null} - An object with provider and model keys.
|
||||||
|
*/
|
||||||
#getFallbackProvider() {
|
#getFallbackProvider() {
|
||||||
// First, fallback to the workspace chat provider and model if they exist
|
// First, fallback to the workspace chat provider and model if they exist
|
||||||
if (
|
if (
|
||||||
@ -248,7 +278,7 @@ class AgentHandler {
|
|||||||
* If multi-model loading is supported, we use their agent model selection of the workspace
|
* If multi-model loading is supported, we use their agent model selection of the workspace
|
||||||
* If not supported, we attempt to fallback to the system provider value for the LLM preference
|
* If not supported, we attempt to fallback to the system provider value for the LLM preference
|
||||||
* and if that fails - we assume a reasonable base model to exist.
|
* and if that fails - we assume a reasonable base model to exist.
|
||||||
* @returns {string} the model preference value to use in API calls
|
* @returns {string|null} the model preference value to use in API calls
|
||||||
*/
|
*/
|
||||||
#fetchModel() {
|
#fetchModel() {
|
||||||
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
|
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
|
||||||
@ -261,21 +291,11 @@ class AgentHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The provider was explicitly set, so check if the workspace has an agent model set.
|
// The provider was explicitly set, so check if the workspace has an agent model set.
|
||||||
if (this.invocation.workspace.agentModel) {
|
if (this.invocation.workspace.agentModel)
|
||||||
return this.invocation.workspace.agentModel;
|
return this.invocation.workspace.agentModel;
|
||||||
}
|
|
||||||
|
|
||||||
// If the provider we are using is not supported or does not support multi-model loading
|
// Otherwise, we have no model to use - so guess a default model to use via the provider
|
||||||
// then we use the default model for the provider.
|
// and it's system ENV params and if that fails - we return either a base model or null.
|
||||||
if (!Object.keys(this.noProviderModelDefault).includes(this.provider)) {
|
|
||||||
return this.providerDefault();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load the model from the system environment variable for providers with no multi-model loading.
|
|
||||||
const sysModelKey = this.noProviderModelDefault[this.provider];
|
|
||||||
if (sysModelKey) return process.env[sysModelKey] ?? this.providerDefault();
|
|
||||||
|
|
||||||
// Otherwise, we have no model to use - so guess a default model to use.
|
|
||||||
return this.providerDefault();
|
return this.providerDefault();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,7 +305,6 @@ class AgentHandler {
|
|||||||
|
|
||||||
if (!this.provider)
|
if (!this.provider)
|
||||||
throw new Error("No valid provider found for the agent.");
|
throw new Error("No valid provider found for the agent.");
|
||||||
|
|
||||||
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
|
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
|
||||||
this.checkSetup();
|
this.checkSetup();
|
||||||
}
|
}
|
||||||
|
@ -60,8 +60,7 @@ async function streamChatWithForEmbed(
|
|||||||
const { rawHistory, chatHistory } = await recentEmbedChatHistory(
|
const { rawHistory, chatHistory } = await recentEmbedChatHistory(
|
||||||
sessionId,
|
sessionId,
|
||||||
embed,
|
embed,
|
||||||
messageLimit,
|
messageLimit
|
||||||
chatMode
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// See stream.js comment for more information on this implementation.
|
// See stream.js comment for more information on this implementation.
|
||||||
@ -113,16 +112,27 @@ async function streamChatWithForEmbed(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
contextTexts = [...contextTexts, ...vectorSearchResults.contextTexts];
|
const { fillSourceWindow } = require("../helpers/chat");
|
||||||
|
const filledSources = fillSourceWindow({
|
||||||
|
nDocs: embed.workspace?.topN || 4,
|
||||||
|
searchResults: vectorSearchResults.sources,
|
||||||
|
history: rawHistory,
|
||||||
|
filterIdentifiers: pinnedDocIdentifiers,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Why does contextTexts get all the info, but sources only get current search?
|
||||||
|
// This is to give the ability of the LLM to "comprehend" a contextual response without
|
||||||
|
// populating the Citations under a response with documents the user "thinks" are irrelevant
|
||||||
|
// due to how we manage backfilling of the context to keep chats with the LLM more correct in responses.
|
||||||
|
// If a past citation was used to answer the question - that is visible in the history so it logically makes sense
|
||||||
|
// and does not appear to the user that a new response used information that is otherwise irrelevant for a given prompt.
|
||||||
|
// TLDR; reduces GitHub issues for "LLM citing document that has no answer in it" while keep answers highly accurate.
|
||||||
|
contextTexts = [...contextTexts, ...filledSources.contextTexts];
|
||||||
sources = [...sources, ...vectorSearchResults.sources];
|
sources = [...sources, ...vectorSearchResults.sources];
|
||||||
|
|
||||||
// If in query mode and no sources are found, do not
|
// If in query mode and no sources are found in current search or backfilled from history, do not
|
||||||
// let the LLM try to hallucinate a response or use general knowledge
|
// let the LLM try to hallucinate a response or use general knowledge
|
||||||
if (
|
if (chatMode === "query" && contextTexts.length === 0) {
|
||||||
chatMode === "query" &&
|
|
||||||
sources.length === 0 &&
|
|
||||||
pinnedDocIdentifiers.length === 0
|
|
||||||
) {
|
|
||||||
writeResponseChunk(response, {
|
writeResponseChunk(response, {
|
||||||
id: uuid,
|
id: uuid,
|
||||||
type: "textResponse",
|
type: "textResponse",
|
||||||
@ -178,7 +188,7 @@ async function streamChatWithForEmbed(
|
|||||||
await EmbedChats.new({
|
await EmbedChats.new({
|
||||||
embedId: embed.id,
|
embedId: embed.id,
|
||||||
prompt: message,
|
prompt: message,
|
||||||
response: { text: completeText, type: chatMode },
|
response: { text: completeText, type: chatMode, sources },
|
||||||
connection_information: response.locals.connection
|
connection_information: response.locals.connection
|
||||||
? {
|
? {
|
||||||
...response.locals.connection,
|
...response.locals.connection,
|
||||||
@ -190,15 +200,13 @@ async function streamChatWithForEmbed(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// On query we don't return message history. All other chat modes and when chatting
|
/**
|
||||||
// with no embeddings we return history.
|
* @param {string} sessionId the session id of the user from embed widget
|
||||||
async function recentEmbedChatHistory(
|
* @param {Object} embed the embed config object
|
||||||
sessionId,
|
* @param {Number} messageLimit the number of messages to return
|
||||||
embed,
|
* @returns {Promise<{rawHistory: import("@prisma/client").embed_chats[], chatHistory: {role: string, content: string}[]}>
|
||||||
messageLimit = 20,
|
*/
|
||||||
chatMode = null
|
async function recentEmbedChatHistory(sessionId, embed, messageLimit = 20) {
|
||||||
) {
|
|
||||||
if (chatMode === "query") return { rawHistory: [], chatHistory: [] };
|
|
||||||
const rawHistory = (
|
const rawHistory = (
|
||||||
await EmbedChats.forEmbedByUser(embed.id, sessionId, messageLimit, {
|
await EmbedChats.forEmbedByUser(embed.id, sessionId, messageLimit, {
|
||||||
id: "desc",
|
id: "desc",
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
|
const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
|
||||||
|
const { fetchApiPieModels } = require("../AiProviders/apipie");
|
||||||
const { perplexityModels } = require("../AiProviders/perplexity");
|
const { perplexityModels } = require("../AiProviders/perplexity");
|
||||||
const { togetherAiModels } = require("../AiProviders/togetherAi");
|
const { togetherAiModels } = require("../AiProviders/togetherAi");
|
||||||
const { fireworksAiModels } = require("../AiProviders/fireworksAi");
|
const { fireworksAiModels } = require("../AiProviders/fireworksAi");
|
||||||
@ -19,6 +20,8 @@ const SUPPORT_CUSTOM_MODELS = [
|
|||||||
"elevenlabs-tts",
|
"elevenlabs-tts",
|
||||||
"groq",
|
"groq",
|
||||||
"deepseek",
|
"deepseek",
|
||||||
|
"apipie",
|
||||||
|
"xai",
|
||||||
];
|
];
|
||||||
|
|
||||||
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
||||||
@ -56,6 +59,10 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
|||||||
return await getGroqAiModels(apiKey);
|
return await getGroqAiModels(apiKey);
|
||||||
case "deepseek":
|
case "deepseek":
|
||||||
return await getDeepSeekModels(apiKey);
|
return await getDeepSeekModels(apiKey);
|
||||||
|
case "apipie":
|
||||||
|
return await getAPIPieModels(apiKey);
|
||||||
|
case "xai":
|
||||||
|
return await getXAIModels(apiKey);
|
||||||
default:
|
default:
|
||||||
return { models: [], error: "Invalid provider for custom models" };
|
return { models: [], error: "Invalid provider for custom models" };
|
||||||
}
|
}
|
||||||
@ -124,7 +131,7 @@ async function openAiModels(apiKey = null) {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const gpts = allModels
|
const gpts = allModels
|
||||||
.filter((model) => model.id.startsWith("gpt"))
|
.filter((model) => model.id.startsWith("gpt") || model.id.startsWith("o1"))
|
||||||
.filter(
|
.filter(
|
||||||
(model) => !model.id.includes("vision") && !model.id.includes("instruct")
|
(model) => !model.id.includes("vision") && !model.id.includes("instruct")
|
||||||
)
|
)
|
||||||
@ -355,6 +362,21 @@ async function getOpenRouterModels() {
|
|||||||
return { models, error: null };
|
return { models, error: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function getAPIPieModels(apiKey = null) {
|
||||||
|
const knownModels = await fetchApiPieModels(apiKey);
|
||||||
|
if (!Object.keys(knownModels).length === 0)
|
||||||
|
return { models: [], error: null };
|
||||||
|
|
||||||
|
const models = Object.values(knownModels).map((model) => {
|
||||||
|
return {
|
||||||
|
id: model.id,
|
||||||
|
organization: model.organization,
|
||||||
|
name: model.name,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
return { models, error: null };
|
||||||
|
}
|
||||||
|
|
||||||
async function getMistralModels(apiKey = null) {
|
async function getMistralModels(apiKey = null) {
|
||||||
const { OpenAI: OpenAIApi } = require("openai");
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
const openai = new OpenAIApi({
|
const openai = new OpenAIApi({
|
||||||
@ -447,6 +469,36 @@ async function getDeepSeekModels(apiKey = null) {
|
|||||||
return { models, error: null };
|
return { models, error: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function getXAIModels(_apiKey = null) {
|
||||||
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
const apiKey =
|
||||||
|
_apiKey === true
|
||||||
|
? process.env.XAI_LLM_API_KEY
|
||||||
|
: _apiKey || process.env.XAI_LLM_API_KEY || null;
|
||||||
|
const openai = new OpenAIApi({
|
||||||
|
baseURL: "https://api.x.ai/v1",
|
||||||
|
apiKey,
|
||||||
|
});
|
||||||
|
const models = await openai.models
|
||||||
|
.list()
|
||||||
|
.then((results) => results.data)
|
||||||
|
.catch((e) => {
|
||||||
|
console.error(`XAI:listModels`, e.message);
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
created: 1725148800,
|
||||||
|
id: "grok-beta",
|
||||||
|
object: "model",
|
||||||
|
owned_by: "xai",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
});
|
||||||
|
|
||||||
|
// Api Key was successful so lets save it for future uses
|
||||||
|
if (models.length > 0 && !!apiKey) process.env.XAI_LLM_API_KEY = apiKey;
|
||||||
|
return { models, error: null };
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
getCustomModels,
|
getCustomModels,
|
||||||
};
|
};
|
||||||
|
@ -162,6 +162,12 @@ function getLLMProvider({ provider = null, model = null } = {}) {
|
|||||||
case "deepseek":
|
case "deepseek":
|
||||||
const { DeepSeekLLM } = require("../AiProviders/deepseek");
|
const { DeepSeekLLM } = require("../AiProviders/deepseek");
|
||||||
return new DeepSeekLLM(embedder, model);
|
return new DeepSeekLLM(embedder, model);
|
||||||
|
case "apipie":
|
||||||
|
const { ApiPieLLM } = require("../AiProviders/apipie");
|
||||||
|
return new ApiPieLLM(embedder, model);
|
||||||
|
case "xai":
|
||||||
|
const { XAiLLM } = require("../AiProviders/xai");
|
||||||
|
return new XAiLLM(embedder, model);
|
||||||
default:
|
default:
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
|
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
|
||||||
@ -285,6 +291,15 @@ function getLLMProviderClass({ provider = null } = {}) {
|
|||||||
case "bedrock":
|
case "bedrock":
|
||||||
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
|
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
|
||||||
return AWSBedrockLLM;
|
return AWSBedrockLLM;
|
||||||
|
case "deepseek":
|
||||||
|
const { DeepSeekLLM } = require("../AiProviders/deepseek");
|
||||||
|
return DeepSeekLLM;
|
||||||
|
case "apipie":
|
||||||
|
const { ApiPieLLM } = require("../AiProviders/apipie");
|
||||||
|
return ApiPieLLM;
|
||||||
|
case "xai":
|
||||||
|
const { XAiLLM } = require("../AiProviders/xai");
|
||||||
|
return XAiLLM;
|
||||||
default:
|
default:
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -469,6 +469,10 @@ const KEY_MAPPING = {
|
|||||||
envKey: "AGENT_SEARXNG_API_URL",
|
envKey: "AGENT_SEARXNG_API_URL",
|
||||||
checks: [],
|
checks: [],
|
||||||
},
|
},
|
||||||
|
AgentTavilyApiKey: {
|
||||||
|
envKey: "AGENT_TAVILY_API_KEY",
|
||||||
|
checks: [],
|
||||||
|
},
|
||||||
|
|
||||||
// TTS/STT Integration ENVS
|
// TTS/STT Integration ENVS
|
||||||
TextToSpeechProvider: {
|
TextToSpeechProvider: {
|
||||||
@ -502,6 +506,20 @@ const KEY_MAPPING = {
|
|||||||
checks: [],
|
checks: [],
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// OpenAI Generic TTS
|
||||||
|
TTSOpenAICompatibleKey: {
|
||||||
|
envKey: "TTS_OPEN_AI_COMPATIBLE_KEY",
|
||||||
|
checks: [],
|
||||||
|
},
|
||||||
|
TTSOpenAICompatibleVoiceModel: {
|
||||||
|
envKey: "TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL",
|
||||||
|
checks: [isNotEmpty],
|
||||||
|
},
|
||||||
|
TTSOpenAICompatibleEndpoint: {
|
||||||
|
envKey: "TTS_OPEN_AI_COMPATIBLE_ENDPOINT",
|
||||||
|
checks: [isValidURL],
|
||||||
|
},
|
||||||
|
|
||||||
// DeepSeek Options
|
// DeepSeek Options
|
||||||
DeepSeekApiKey: {
|
DeepSeekApiKey: {
|
||||||
envKey: "DEEPSEEK_API_KEY",
|
envKey: "DEEPSEEK_API_KEY",
|
||||||
@ -511,6 +529,26 @@ const KEY_MAPPING = {
|
|||||||
envKey: "DEEPSEEK_MODEL_PREF",
|
envKey: "DEEPSEEK_MODEL_PREF",
|
||||||
checks: [isNotEmpty],
|
checks: [isNotEmpty],
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// APIPie Options
|
||||||
|
ApipieLLMApiKey: {
|
||||||
|
envKey: "APIPIE_LLM_API_KEY",
|
||||||
|
checks: [isNotEmpty],
|
||||||
|
},
|
||||||
|
ApipieLLMModelPref: {
|
||||||
|
envKey: "APIPIE_LLM_MODEL_PREF",
|
||||||
|
checks: [isNotEmpty],
|
||||||
|
},
|
||||||
|
|
||||||
|
// xAI Options
|
||||||
|
XAIApiKey: {
|
||||||
|
envKey: "XAI_LLM_API_KEY",
|
||||||
|
checks: [isNotEmpty],
|
||||||
|
},
|
||||||
|
XAIModelPref: {
|
||||||
|
envKey: "XAI_LLM_MODEL_PREF",
|
||||||
|
checks: [isNotEmpty],
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
function isNotEmpty(input = "") {
|
function isNotEmpty(input = "") {
|
||||||
@ -575,6 +613,7 @@ function supportedTTSProvider(input = "") {
|
|||||||
"openai",
|
"openai",
|
||||||
"elevenlabs",
|
"elevenlabs",
|
||||||
"piper_local",
|
"piper_local",
|
||||||
|
"generic-openai",
|
||||||
].includes(input);
|
].includes(input);
|
||||||
return validSelection ? null : `${input} is not a valid TTS provider.`;
|
return validSelection ? null : `${input} is not a valid TTS provider.`;
|
||||||
}
|
}
|
||||||
@ -613,6 +652,8 @@ function supportedLLM(input = "") {
|
|||||||
"generic-openai",
|
"generic-openai",
|
||||||
"bedrock",
|
"bedrock",
|
||||||
"deepseek",
|
"deepseek",
|
||||||
|
"apipie",
|
||||||
|
"xai",
|
||||||
].includes(input);
|
].includes(input);
|
||||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||||
}
|
}
|
||||||
@ -856,6 +897,8 @@ function dumpENV() {
|
|||||||
"ENABLE_HTTPS",
|
"ENABLE_HTTPS",
|
||||||
"HTTPS_CERT_PATH",
|
"HTTPS_CERT_PATH",
|
||||||
"HTTPS_KEY_PATH",
|
"HTTPS_KEY_PATH",
|
||||||
|
// Other Configuration Keys
|
||||||
|
"DISABLE_VIEW_CHAT_HISTORY",
|
||||||
];
|
];
|
||||||
|
|
||||||
// Simple sanitization of each value to prevent ENV injection via newline or quote escaping.
|
// Simple sanitization of each value to prevent ENV injection via newline or quote escaping.
|
||||||
|
18
server/utils/middleware/chatHistoryViewable.js
Normal file
18
server/utils/middleware/chatHistoryViewable.js
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
/**
|
||||||
|
* A simple middleware that validates that the chat history is viewable.
|
||||||
|
* via the `DISABLE_VIEW_CHAT_HISTORY` environment variable being set AT ALL.
|
||||||
|
* @param {Request} request - The request object.
|
||||||
|
* @param {Response} response - The response object.
|
||||||
|
* @param {NextFunction} next - The next function.
|
||||||
|
*/
|
||||||
|
function chatHistoryViewable(_request, response, next) {
|
||||||
|
if ("DISABLE_VIEW_CHAT_HISTORY" in process.env)
|
||||||
|
return response
|
||||||
|
.status(422)
|
||||||
|
.send("This feature has been disabled by the administrator.");
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
chatHistoryViewable,
|
||||||
|
};
|
Loading…
Reference in New Issue
Block a user