merge with master

This commit is contained in:
timothycarambat 2024-05-21 14:48:16 -05:00
commit c65ab6d863
118 changed files with 3150 additions and 514 deletions

View File

@ -67,9 +67,9 @@ Some cool features of AnythingLLM
- Extremely efficient cost-saving measures for managing very large documents. You'll never pay to embed a massive document or transcript more than once. 90% more cost effective than other document chatbot solutions. - Extremely efficient cost-saving measures for managing very large documents. You'll never pay to embed a massive document or transcript more than once. 90% more cost effective than other document chatbot solutions.
- Full Developer API for custom integrations! - Full Developer API for custom integrations!
### Supported LLMs, Embedders, Transcriptions models, and Vector Databases ### Supported LLMs, Embedder Models, Speech models, and Vector Databases
**Supported LLMs:** **Language Learning Models:**
- [Any open-source llama.cpp compatible model](/server/storage/models/README.md#text-generation-llm-selection) - [Any open-source llama.cpp compatible model](/server/storage/models/README.md#text-generation-llm-selection)
- [OpenAI](https://openai.com) - [OpenAI](https://openai.com)
@ -88,9 +88,10 @@ Some cool features of AnythingLLM
- [Groq](https://groq.com/) - [Groq](https://groq.com/)
- [Cohere](https://cohere.com/) - [Cohere](https://cohere.com/)
- [KoboldCPP](https://github.com/LostRuins/koboldcpp) - [KoboldCPP](https://github.com/LostRuins/koboldcpp)
- [LiteLLM](https://github.com/BerriAI/litellm)
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui) - [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
**Supported Embedding models:** **Embedder models:**
- [AnythingLLM Native Embedder](/server/storage/models/README.md) (default) - [AnythingLLM Native Embedder](/server/storage/models/README.md) (default)
- [OpenAI](https://openai.com) - [OpenAI](https://openai.com)
@ -100,12 +101,22 @@ Some cool features of AnythingLLM
- [LM Studio (all)](https://lmstudio.ai) - [LM Studio (all)](https://lmstudio.ai)
- [Cohere](https://cohere.com/) - [Cohere](https://cohere.com/)
**Supported Transcription models:** **Audio Transcription models:**
- [AnythingLLM Built-in](https://github.com/Mintplex-Labs/anything-llm/tree/master/server/storage/models#audiovideo-transcription) (default) - [AnythingLLM Built-in](https://github.com/Mintplex-Labs/anything-llm/tree/master/server/storage/models#audiovideo-transcription) (default)
- [OpenAI](https://openai.com/) - [OpenAI](https://openai.com/)
**Supported Vector Databases:** **TTS (text-to-speech) support:**
- Native Browser Built-in (default)
- [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech/voice-options)
- [ElevenLabs](https://elevenlabs.io/)
**STT (speech-to-text) support:**
- Native Browser Built-in (default)
**Vector Databases:**
- [LanceDB](https://github.com/lancedb/lancedb) (default) - [LanceDB](https://github.com/lancedb/lancedb) (default)
- [Astra DB](https://www.datastax.com/products/datastax-astra) - [Astra DB](https://www.datastax.com/products/datastax-astra)
@ -122,8 +133,9 @@ This monorepo consists of three main sections:
- `frontend`: A viteJS + React frontend that you can run to easily create and manage all your content the LLM can use. - `frontend`: A viteJS + React frontend that you can run to easily create and manage all your content the LLM can use.
- `server`: A NodeJS express server to handle all the interactions and do all the vectorDB management and LLM interactions. - `server`: A NodeJS express server to handle all the interactions and do all the vectorDB management and LLM interactions.
- `docker`: Docker instructions and build process + information for building from source.
- `collector`: NodeJS express server that process and parses documents from the UI. - `collector`: NodeJS express server that process and parses documents from the UI.
- `docker`: Docker instructions and build process + information for building from source.
- `embed`: Code specifically for generation of the [embed widget](./embed/README.md).
## 🛳 Self Hosting ## 🛳 Self Hosting
@ -132,9 +144,9 @@ Mintplex Labs & the community maintain a number of deployment methods, scripts,
|----------------------------------------|----:|-----|---------------|------------| |----------------------------------------|----:|-----|---------------|------------|
| [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][do-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] | | [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][do-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] |
| Railway | | Railway | RepoCloud |
| --------------------------------------------------- | | --- | --- |
| [![Deploy on Railway][railway-btn]][railway-deploy] | | [![Deploy on Railway][railway-btn]][railway-deploy] | [![Deploy on RepoCloud][repocloud-btn]][repocloud-deploy] |
[or set up a production AnythingLLM instance without Docker →](./BARE_METAL.md) [or set up a production AnythingLLM instance without Docker →](./BARE_METAL.md)
@ -223,3 +235,5 @@ This project is [MIT](./LICENSE) licensed.
[render-deploy]: https://render.com/deploy?repo=https://github.com/Mintplex-Labs/anything-llm&branch=render [render-deploy]: https://render.com/deploy?repo=https://github.com/Mintplex-Labs/anything-llm&branch=render
[railway-btn]: https://railway.app/button.svg [railway-btn]: https://railway.app/button.svg
[railway-deploy]: https://railway.app/template/HNSCS1?referralCode=WFgJkn [railway-deploy]: https://railway.app/template/HNSCS1?referralCode=WFgJkn
[repocloud-btn]: https://d16t0pc4846x52.cloudfront.net/deploylobe.svg
[repocloud-deploy]: https://repocloud.io/details/?app_id=276

View File

@ -1,19 +1,23 @@
const fs = require("fs"); const fs = require("fs");
const path = require("path"); const path = require("path");
const { v4 } = require("uuid"); const { v4 } = require("uuid");
const defaultWhisper = "Xenova/whisper-small"; // Model Card: https://huggingface.co/Xenova/whisper-small
const fileSize = {
"Xenova/whisper-small": "250mb",
"Xenova/whisper-large": "1.56GB",
};
class LocalWhisper { class LocalWhisper {
constructor() { constructor({ options }) {
// Model Card: https://huggingface.co/Xenova/whisper-small this.model = options?.WhisperModelPref ?? defaultWhisper;
this.model = "Xenova/whisper-small"; this.fileSize = fileSize[this.model];
this.cacheDir = path.resolve( this.cacheDir = path.resolve(
process.env.STORAGE_DIR process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, `models`) ? path.resolve(process.env.STORAGE_DIR, `models`)
: path.resolve(__dirname, `../../../server/storage/models`) : path.resolve(__dirname, `../../../server/storage/models`)
); );
this.modelPath = path.resolve(this.cacheDir, "Xenova", "whisper-small"); this.modelPath = path.resolve(this.cacheDir, ...this.model.split("/"));
// Make directory when it does not exist in existing installations // Make directory when it does not exist in existing installations
if (!fs.existsSync(this.cacheDir)) if (!fs.existsSync(this.cacheDir))
fs.mkdirSync(this.cacheDir, { recursive: true }); fs.mkdirSync(this.cacheDir, { recursive: true });
@ -104,7 +108,7 @@ class LocalWhisper {
async client() { async client() {
if (!fs.existsSync(this.modelPath)) { if (!fs.existsSync(this.modelPath)) {
this.#log( this.#log(
`The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~250MB)` `The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~${this.fileSize})`
); );
} }

View File

@ -12,18 +12,23 @@ const {
function validSpaceUrl(spaceUrl = "") { function validSpaceUrl(spaceUrl = "") {
// Atlassian default URL match // Atlassian default URL match
const atlassianPattern = new UrlPattern( const atlassianPattern = new UrlPattern(
"https\\://(:subdomain).atlassian.net/wiki/spaces/(:spaceKey)/*" "https\\://(:subdomain).atlassian.net/wiki/spaces/(:spaceKey)*"
); );
const atlassianMatch = atlassianPattern.match(spaceUrl); const atlassianMatch = atlassianPattern.match(spaceUrl);
if (atlassianMatch) { if (atlassianMatch) {
return { valid: true, result: atlassianMatch }; return { valid: true, result: atlassianMatch };
} }
// Custom Confluence URL match let customMatch = null;
const customPattern = new UrlPattern( [
"https\\://(:subdomain.):domain.:tld/wiki/spaces/(:spaceKey)/*" "https\\://(:subdomain.):domain.:tld/wiki/spaces/(:spaceKey)*", // Custom Confluence space
); "https\\://(:subdomain.):domain.:tld/display/(:spaceKey)*", // Custom Confluence space + Human-readable space tag.
const customMatch = customPattern.match(spaceUrl); ].forEach((matchPattern) => {
if (!!customMatch) return;
const pattern = new UrlPattern(matchPattern);
customMatch = pattern.match(spaceUrl);
});
if (customMatch) { if (customMatch) {
customMatch.customDomain = customMatch.customDomain =
(customMatch.subdomain ? `${customMatch.subdomain}.` : "") + // (customMatch.subdomain ? `${customMatch.subdomain}.` : "") + //

View File

@ -82,6 +82,12 @@ GID='1000'
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096 # GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc # GENERIC_OPEN_AI_API_KEY=sk-123abc
# LLM_PROVIDER='litellm'
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# LLM_PROVIDER='cohere' # LLM_PROVIDER='cohere'
# COHERE_API_KEY= # COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r' # COHERE_MODEL_PREF='command-r'
@ -118,6 +124,10 @@ GID='1000'
# COHERE_API_KEY= # COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0' # EMBEDDING_MODEL_PREF='embed-english-v3.0'
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
########################################### ###########################################
######## Vector Database Selection ######## ######## Vector Database Selection ########
########################################### ###########################################

View File

@ -1,4 +1,8 @@
<!doctype html> <!doctype html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
</head>
<html lang="en"> <html lang="en">
<body> <body>

View File

@ -28,18 +28,18 @@ export default function App() {
const position = embedSettings.position || "bottom-right"; const position = embedSettings.position || "bottom-right";
const windowWidth = embedSettings.windowWidth const windowWidth = embedSettings.windowWidth
? `md:max-w-[${embedSettings.windowWidth}]` ? `max-w-[${embedSettings.windowWidth}]`
: "md:max-w-[400px]"; : "max-w-[400px]";
const windowHeight = embedSettings.windowHeight const windowHeight = embedSettings.windowHeight
? `md:max-h-[${embedSettings.windowHeight}]` ? `max-h-[${embedSettings.windowHeight}]`
: "md:max-h-[700px]"; : "max-h-[700px]";
return ( return (
<> <>
<Head /> <Head />
<div className={`fixed inset-0 z-50 ${isChatOpen ? "block" : "hidden"}`}> <div className={`fixed inset-0 z-50 ${isChatOpen ? "block" : "hidden"}`}>
<div <div
className={`${windowHeight} ${windowWidth} h-full w-full bg-white md:fixed md:bottom-0 md:right-0 md:mb-4 md:mr-4 md:rounded-2xl md:border md:border-gray-300 md:shadow-[0_4px_14px_rgba(0,0,0,0.25)] ${positionClasses[position]}`} className={`${windowHeight} ${windowWidth} h-full w-full bg-white fixed bottom-0 right-0 mb-4 md:mr-4 rounded-2xl border border-gray-300 shadow-[0_4px_14px_rgba(0,0,0,0.25)] ${positionClasses[position]}`}
id="anything-llm-chat" id="anything-llm-chat"
> >
{isChatOpen && ( {isChatOpen && (

2
frontend/.gitignore vendored
View File

@ -9,10 +9,8 @@ lerna-debug.log*
node_modules node_modules
dist dist
lib
dist-ssr dist-ssr
*.local *.local
!frontend/components/lib
# Editor directories and files # Editor directories and files
.vscode/* .vscode/*

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,50 @@
export default function VoyageAiOptions({ settings }) {
return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
API Key
</label>
<input
type="password"
name="VoyageAiApiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Voyage AI API Key"
defaultValue={settings?.VoyageAiApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Model Preference
</label>
<select
name="EmbeddingModelPref"
required={true}
defaultValue={settings?.EmbeddingModelPref}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<optgroup label="Available embedding models">
{[
"voyage-large-2-instruct",
"voyage-law-2",
"voyage-code-2",
"voyage-large-2",
"voyage-2",
].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</optgroup>
</select>
</div>
</div>
</div>
);
}

View File

@ -14,6 +14,8 @@ import {
import React, { useEffect, useState } from "react"; import React, { useEffect, useState } from "react";
import SettingsButton from "../SettingsButton"; import SettingsButton from "../SettingsButton";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import { Tooltip } from "react-tooltip";
import { v4 } from "uuid";
export const MAX_ICONS = 3; export const MAX_ICONS = 3;
export const ICON_COMPONENTS = { export const ICON_COMPONENTS = {
@ -47,36 +49,48 @@ export default function Footer() {
return ( return (
<div className="flex justify-center mb-2"> <div className="flex justify-center mb-2">
<div className="flex space-x-4"> <div className="flex space-x-4">
<ToolTipWrapper id="open-github">
<a <a
href={paths.github()} href={paths.github()}
target="_blank" target="_blank"
rel="noreferrer" rel="noreferrer"
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
aria-label="Find us on Github" aria-label="Find us on Github"
data-tooltip-id="open-github"
data-tooltip-content="View source code on Github"
> >
<GithubLogo weight="fill" className="h-5 w-5 " /> <GithubLogo weight="fill" className="h-5 w-5 " />
</a> </a>
</ToolTipWrapper>
<ToolTipWrapper id="open-documentation">
<a <a
href={paths.docs()} href={paths.docs()}
target="_blank" target="_blank"
rel="noreferrer" rel="noreferrer"
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" className="w-fit transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
aria-label="Docs" aria-label="Docs"
data-tooltip-id="open-documentation"
data-tooltip-content="Open AnythingLLM help docs"
> >
<BookOpen weight="fill" className="h-5 w-5 " /> <BookOpen weight="fill" className="h-5 w-5 " />
</a> </a>
</ToolTipWrapper>
<ToolTipWrapper id="open-discord">
<a <a
href={paths.discord()} href={paths.discord()}
target="_blank" target="_blank"
rel="noreferrer" rel="noreferrer"
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
aria-label="Join our Discord server" aria-label="Join our Discord server"
data-tooltip-id="open-discord"
data-tooltip-content="Join the AnythingLLM Discord"
> >
<DiscordLogo <DiscordLogo
weight="fill" weight="fill"
className="h-5 w-5 stroke-slate-200 group-hover:stroke-slate-200" className="h-5 w-5 stroke-slate-200 group-hover:stroke-slate-200"
/> />
</a> </a>
</ToolTipWrapper>
{!isMobile && <SettingsButton />} {!isMobile && <SettingsButton />}
</div> </div>
</div> </div>
@ -105,3 +119,17 @@ export default function Footer() {
</div> </div>
); );
} }
export function ToolTipWrapper({ id = v4(), children }) {
return (
<div className="flex w-fit">
{children}
<Tooltip
id={id}
place="top"
delayShow={300}
className="tooltip !text-xs z-99"
/>
</div>
);
}

View File

@ -19,6 +19,7 @@ export default function GeminiLLMOptions({ settings }) {
</div> </div>
{!settings?.credentialsOnly && ( {!settings?.credentialsOnly && (
<>
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection Chat Model Selection
@ -38,6 +39,27 @@ export default function GeminiLLMOptions({ settings }) {
})} })}
</select> </select>
</div> </div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Safety Setting
</label>
<select
name="GeminiSafetySetting"
defaultValue={
settings?.GeminiSafetySetting || "BLOCK_MEDIUM_AND_ABOVE"
}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option value="BLOCK_NONE">None</option>
<option value="BLOCK_ONLY_HIGH">Block few</option>
<option value="BLOCK_MEDIUM_AND_ABOVE">
Block some (default)
</option>
<option value="BLOCK_LOW_AND_ABOVE">Block most</option>
</select>
</div>
</>
)} )}
</div> </div>
</div> </div>

View File

@ -1,5 +1,6 @@
export default function GenericOpenAiOptions({ settings }) { export default function GenericOpenAiOptions({ settings }) {
return ( return (
<div className="flex flex-col gap-y-4">
<div className="flex gap-4 flex-wrap"> <div className="flex gap-4 flex-wrap">
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-4">
@ -45,6 +46,8 @@ export default function GenericOpenAiOptions({ settings }) {
autoComplete="off" autoComplete="off"
/> />
</div> </div>
</div>
<div className="flex gap-x-4 flex-wrap">
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-4">
Token context window Token context window
@ -77,5 +80,6 @@ export default function GenericOpenAiOptions({ settings }) {
/> />
</div> </div>
</div> </div>
</div>
); );
} }

View File

@ -0,0 +1,148 @@
import { useEffect, useState } from "react";
import System from "@/models/system";
export default function LiteLLMOptions({ settings }) {
const [basePathValue, setBasePathValue] = useState(settings?.LiteLLMBasePath);
const [basePath, setBasePath] = useState(settings?.LiteLLMBasePath);
const [apiKeyValue, setApiKeyValue] = useState(settings?.LiteLLMAPIKey);
const [apiKey, setApiKey] = useState(settings?.LiteLLMAPIKey);
return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Base URL
</label>
<input
type="url"
name="LiteLLMBasePath"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:4000"
defaultValue={settings?.LiteLLMBasePath}
required={true}
autoComplete="off"
spellCheck={false}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
/>
</div>
<LiteLLMModelSelection
settings={settings}
basePath={basePath}
apiKey={apiKey}
/>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="LiteLLMTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.LiteLLMTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</div>
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<div className="flex flex-col gap-y-1 mb-4">
<label className="text-white text-sm font-semibold flex items-center gap-x-2">
API Key <p className="!text-xs !italic !font-thin">optional</p>
</label>
</div>
<input
type="password"
name="LiteLLMAPIKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="sk-mysecretkey"
defaultValue={settings?.LiteLLMAPIKey ? "*".repeat(20) : ""}
autoComplete="off"
spellCheck={false}
onChange={(e) => setApiKeyValue(e.target.value)}
onBlur={() => setApiKey(apiKeyValue)}
/>
</div>
</div>
</div>
);
}
function LiteLLMModelSelection({ settings, basePath = null, apiKey = null }) {
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
if (!basePath) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels(
"litellm",
typeof apiKey === "boolean" ? null : apiKey,
basePath
);
setCustomModels(models || []);
setLoading(false);
}
findCustomModels();
}, [basePath, apiKey]);
if (loading || customModels.length == 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="LiteLLMModelPref"
disabled={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
{basePath?.includes("/v1")
? "-- loading available models --"
: "-- waiting for URL --"}
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="LiteLLMModelPref"
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{customModels.length > 0 && (
<optgroup label="Your loaded models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={settings.LiteLLMModelPref === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
</select>
</div>
);
}

View File

@ -3,7 +3,7 @@ import System from "@/models/system";
import showToast from "@/utils/toast"; import showToast from "@/utils/toast";
import pluralize from "pluralize"; import pluralize from "pluralize";
import { TagsInput } from "react-tag-input-component"; import { TagsInput } from "react-tag-input-component";
import { Warning } from "@phosphor-icons/react"; import { Info, Warning } from "@phosphor-icons/react";
import { Tooltip } from "react-tooltip"; import { Tooltip } from "react-tooltip";
const DEFAULT_BRANCHES = ["main", "master"]; const DEFAULT_BRANCHES = ["main", "master"];
@ -92,45 +92,7 @@ export default function GithubOptions() {
<p className="font-bold text-white">Github Access Token</p>{" "} <p className="font-bold text-white">Github Access Token</p>{" "}
<p className="text-xs text-white/50 font-light flex items-center"> <p className="text-xs text-white/50 font-light flex items-center">
optional optional
{!accessToken && ( <PATTooltip accessToken={accessToken} />
<Warning
size={14}
className="ml-1 text-orange-500 cursor-pointer"
data-tooltip-id="access-token-tooltip"
data-tooltip-place="right"
/>
)}
<Tooltip
delayHide={300}
id="access-token-tooltip"
className="max-w-xs"
clickable={true}
>
<p className="text-sm">
Without a{" "}
<a
href="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
Personal Access Token
</a>
, the GitHub API may limit the number of files that
can be collected due to rate limits. You can{" "}
<a
href="https://github.com/settings/personal-access-tokens/new"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
create a temporary Access Token
</a>{" "}
to avoid this issue.
</p>
</Tooltip>
</p> </p>
</label> </label>
<p className="text-xs font-normal text-white/50"> <p className="text-xs font-normal text-white/50">
@ -180,6 +142,7 @@ export default function GithubOptions() {
</div> </div>
<div className="flex flex-col gap-y-2 w-full pr-10"> <div className="flex flex-col gap-y-2 w-full pr-10">
<PATAlert accessToken={accessToken} />
<button <button
type="submit" type="submit"
disabled={loading} disabled={loading}
@ -269,3 +232,78 @@ function GitHubBranchSelection({ repo, accessToken }) {
</div> </div>
); );
} }
function PATAlert({ accessToken }) {
if (!!accessToken) return null;
return (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Info className="shrink-0" size={25} />
<p className="text-sm">
Without filling out the <b>Github Access Token</b> this data connector
will only be able to collect the <b>top-level</b> files of the repo
due to GitHub's public API rate-limits.
<br />
<br />
<a
href="https://github.com/settings/personal-access-tokens/new"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
{" "}
Get a free Personal Access Token with a GitHub account here.
</a>
</p>
</div>
</div>
);
}
function PATTooltip({ accessToken }) {
if (!!accessToken) return null;
return (
<>
{!accessToken && (
<Warning
size={14}
className="ml-1 text-orange-500 cursor-pointer"
data-tooltip-id="access-token-tooltip"
data-tooltip-place="right"
/>
)}
<Tooltip
delayHide={300}
id="access-token-tooltip"
className="max-w-xs"
clickable={true}
>
<p className="text-sm">
Without a{" "}
<a
href="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
Personal Access Token
</a>
, the GitHub API may limit the number of files that can be collected
due to rate limits. You can{" "}
<a
href="https://github.com/settings/personal-access-tokens/new"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
create a temporary Access Token
</a>{" "}
to avoid this issue.
</p>
</Tooltip>
</>
);
}

View File

@ -3,6 +3,7 @@ import paths from "@/utils/paths";
import { ArrowUUpLeft, Wrench } from "@phosphor-icons/react"; import { ArrowUUpLeft, Wrench } from "@phosphor-icons/react";
import { Link } from "react-router-dom"; import { Link } from "react-router-dom";
import { useMatch } from "react-router-dom"; import { useMatch } from "react-router-dom";
import { ToolTipWrapper } from "../Footer";
export default function SettingsButton() { export default function SettingsButton() {
const isInSettings = !!useMatch("/settings/*"); const isInSettings = !!useMatch("/settings/*");
@ -12,22 +13,32 @@ export default function SettingsButton() {
if (isInSettings) if (isInSettings)
return ( return (
<ToolTipWrapper id="go-home">
<Link <Link
to={paths.home()} to={paths.home()}
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
aria-label="Home" aria-label="Home"
data-tooltip-id="go-home"
data-tooltip-content="Back to workspaces"
> >
<ArrowUUpLeft className="h-5 w-5" weight="fill" /> <ArrowUUpLeft className="h-5 w-5" weight="fill" />
</Link> </Link>
</ToolTipWrapper>
); );
return ( return (
<ToolTipWrapper id="open-settings">
<Link <Link
to={!!user?.role ? paths.settings.system() : paths.settings.appearance()} to={
!!user?.role ? paths.settings.system() : paths.settings.appearance()
}
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
aria-label="Settings" aria-label="Settings"
data-tooltip-id="open-settings"
data-tooltip-content="Open settings"
> >
<Wrench className="h-5 w-5" weight="fill" /> <Wrench className="h-5 w-5" weight="fill" />
</Link> </Link>
</ToolTipWrapper>
); );
} }

View File

@ -329,7 +329,7 @@ const SidebarOptions = ({ user = null }) => (
<Option <Option
href={paths.settings.embedSetup()} href={paths.settings.embedSetup()}
childLinks={[paths.settings.embedChats()]} childLinks={[paths.settings.embedChats()]}
btnText="Embedded Chat" btnText="Chat Embed Widgets"
icon={<CodeBlock className="h-5 w-5 flex-shrink-0" />} icon={<CodeBlock className="h-5 w-5 flex-shrink-0" />}
user={user} user={user}
flex={true} flex={true}
@ -338,7 +338,7 @@ const SidebarOptions = ({ user = null }) => (
<> <>
<Option <Option
href={paths.settings.embedChats()} href={paths.settings.embedChats()}
btnText="Embedded Chat History" btnText="Chat Embed History"
icon={<Barcode className="h-5 w-5 flex-shrink-0" />} icon={<Barcode className="h-5 w-5 flex-shrink-0" />}
user={user} user={user}
flex={true} flex={true}

View File

@ -84,6 +84,7 @@ function ElevenLabsModelSelection({ apiKey, settings }) {
<select <select
name="TTSElevenLabsVoiceModel" name="TTSElevenLabsVoiceModel"
required={true} required={true}
defaultValue={settings?.TTSElevenLabsVoiceModel}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
{Object.keys(groupedModels) {Object.keys(groupedModels)
@ -91,11 +92,7 @@ function ElevenLabsModelSelection({ apiKey, settings }) {
.map((organization) => ( .map((organization) => (
<optgroup key={organization} label={organization}> <optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => ( {groupedModels[organization].map((model) => (
<option <option key={model.id} value={model.id}>
key={model.id}
value={model.id}
selected={settings?.OpenAiModelPref === model.id}
>
{model.name} {model.name}
</option> </option>
))} ))}

View File

@ -35,7 +35,11 @@ export default function OpenAiTextToSpeechOptions({ settings }) {
> >
{["alloy", "echo", "fable", "onyx", "nova", "shimmer"].map( {["alloy", "echo", "fable", "onyx", "nova", "shimmer"].map(
(voice) => { (voice) => {
return <option value={voice}>{toProperCase(voice)}</option>; return (
<option key={voice} value={voice}>
{toProperCase(voice)}
</option>
);
} }
)} )}
</select> </select>

View File

@ -1,38 +1,89 @@
import { Gauge } from "@phosphor-icons/react"; import { Gauge } from "@phosphor-icons/react";
export default function NativeTranscriptionOptions() { import { useState } from "react";
export default function NativeTranscriptionOptions({ settings }) {
const [model, setModel] = useState(settings?.WhisperModelPref);
return ( return (
<div className="w-full flex flex-col gap-y-4"> <div className="w-full flex flex-col gap-y-4">
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2"> <LocalWarning model={model} />
<div className="gap-x-2 flex items-center">
<Gauge size={25} />
<p className="text-sm">
Using the local whisper model on machines with limited RAM or CPU
can stall AnythingLLM when processing media files.
<br />
We recommend at least 2GB of RAM and upload files &lt;10Mb.
<br />
<br />
<i>
The built-in model will automatically download on the first use.
</i>
</p>
</div>
</div>
<div className="w-full flex items-center gap-4"> <div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-4">
Model Selection Model Selection
</label> </label>
<select <select
disabled={true} name="WhisperModelPref"
defaultValue={model}
onChange={(e) => setModel(e.target.value)}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
<option disabled={true} selected={true}> {["Xenova/whisper-small", "Xenova/whisper-large"].map(
Xenova/whisper-small (value, i) => {
return (
<option key={i} value={value}>
{value}
</option> </option>
);
}
)}
</select> </select>
</div> </div>
</div> </div>
</div> </div>
); );
} }
function LocalWarning({ model }) {
switch (model) {
case "Xenova/whisper-small":
return <WhisperSmall />;
case "Xenova/whisper-large":
return <WhisperLarge />;
default:
return <WhisperSmall />;
}
}
function WhisperSmall() {
return (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Gauge size={25} />
<p className="text-sm">
Running the <b>whisper-small</b> model on a machine with limited RAM
or CPU can stall AnythingLLM when processing media files.
<br />
We recommend at least 2GB of RAM and upload files &lt;10Mb.
<br />
<br />
<i>
This model will automatically download on the first use. (250mb)
</i>
</p>
</div>
</div>
);
}
function WhisperLarge() {
return (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Gauge size={25} />
<p className="text-sm">
Using the <b>whisper-large</b> model on machines with limited RAM or
CPU can stall AnythingLLM when processing media files. This model is
substantially larger than the whisper-small.
<br />
We recommend at least 8GB of RAM and upload files &lt;10Mb.
<br />
<br />
<i>
This model will automatically download on the first use. (1.56GB)
</i>
</p>
</div>
</div>
);
}

View File

@ -115,6 +115,11 @@ function SkeletonLine() {
); );
} }
function omitChunkHeader(text) {
if (!text.startsWith("<document_metadata>")) return text;
return text.split("</document_metadata>")[1].trim();
}
function CitationDetailModal({ source, onClose }) { function CitationDetailModal({ source, onClose }) {
const { references, title, chunks } = source; const { references, title, chunks } = source;
const { isUrl, text: webpageUrl, href: linkTo } = parseChunkSource(source); const { isUrl, text: webpageUrl, href: linkTo } = parseChunkSource(source);
@ -167,7 +172,7 @@ function CitationDetailModal({ source, onClose }) {
<div key={idx} className="pt-6 text-white"> <div key={idx} className="pt-6 text-white">
<div className="flex flex-col w-full justify-start pb-6 gap-y-1"> <div className="flex flex-col w-full justify-start pb-6 gap-y-1">
<p className="text-white whitespace-pre-line"> <p className="text-white whitespace-pre-line">
{HTMLDecode(text)} {HTMLDecode(omitChunkHeader(text))}
</p> </p>
{!!score && ( {!!score && (

View File

@ -32,8 +32,7 @@ const Actions = ({
<div className="flex w-full justify-between items-center"> <div className="flex w-full justify-between items-center">
<div className="flex justify-start items-center gap-x-4"> <div className="flex justify-start items-center gap-x-4">
<CopyMessage message={message} /> <CopyMessage message={message} />
{isLastMessage && {isLastMessage && (
!message?.includes("Workspace chat memory was reset!") && (
<RegenerateMessage <RegenerateMessage
regenerateMessage={regenerateMessage} regenerateMessage={regenerateMessage}
slug={slug} slug={slug}
@ -127,6 +126,7 @@ function CopyMessage({ message }) {
} }
function RegenerateMessage({ regenerateMessage, chatId }) { function RegenerateMessage({ regenerateMessage, chatId }) {
if (!chatId) return null;
return ( return (
<div className="mt-3 relative"> <div className="mt-3 relative">
<button <button

View File

@ -57,7 +57,7 @@ const HistoricalMessage = ({
<div className="flex gap-x-5"> <div className="flex gap-x-5">
<div className="relative w-[35px] h-[35px] rounded-full flex-shrink-0 overflow-hidden" /> <div className="relative w-[35px] h-[35px] rounded-full flex-shrink-0 overflow-hidden" />
<Actions <Actions
message={DOMPurify.sanitize(message)} message={message}
feedbackScore={feedbackScore} feedbackScore={feedbackScore}
chatId={chatId} chatId={chatId}
slug={workspace?.slug} slug={workspace?.slug}

View File

@ -31,10 +31,7 @@ export default function EditPresetModal({
}; };
const handleDelete = async () => { const handleDelete = async () => {
const confirmDelete = window.confirm( if (!window.confirm("Are you sure you want to delete this preset?")) return;
"Are you sure you want to delete this preset?"
);
if (!confirmDelete) return;
setDeleting(true); setDeleting(true);
await onDelete(preset.id); await onDelete(preset.id);

View File

@ -3,7 +3,6 @@ import SlashCommandsButton, {
SlashCommands, SlashCommands,
useSlashCommands, useSlashCommands,
} from "./SlashCommands"; } from "./SlashCommands";
import { isMobile } from "react-device-detect";
import debounce from "lodash.debounce"; import debounce from "lodash.debounce";
import { PaperPlaneRight } from "@phosphor-icons/react"; import { PaperPlaneRight } from "@phosphor-icons/react";
import StopGenerationButton from "./StopGenerationButton"; import StopGenerationButton from "./StopGenerationButton";
@ -13,6 +12,7 @@ import AvailableAgentsButton, {
} from "./AgentMenu"; } from "./AgentMenu";
import TextSizeButton from "./TextSizeMenu"; import TextSizeButton from "./TextSizeMenu";
import SpeechToText from "./SpeechToText"; import SpeechToText from "./SpeechToText";
import { Tooltip } from "react-tooltip";
export const PROMPT_INPUT_EVENT = "set_prompt_input"; export const PROMPT_INPUT_EVENT = "set_prompt_input";
export default function PromptInput({ export default function PromptInput({
@ -83,7 +83,6 @@ export default function PromptInput({
}; };
const adjustTextArea = (event) => { const adjustTextArea = (event) => {
if (isMobile) return false;
const element = event.target; const element = event.target;
element.style.height = "auto"; element.style.height = "auto";
element.style.height = `${element.scrollHeight}px`; element.style.height = `${element.scrollHeight}px`;
@ -130,20 +129,31 @@ export default function PromptInput({
adjustTextArea(e); adjustTextArea(e);
}} }}
value={promptInput} value={promptInput}
className="cursor-text max-h-[100px] md:min-h-[40px] mx-2 md:mx-0 py-2 w-full text-[16px] md:text-md text-white bg-transparent placeholder:text-white/60 resize-none active:outline-none focus:outline-none flex-grow" className="cursor-text max-h-[50vh] md:max-h-[350px] md:min-h-[40px] mx-2 md:mx-0 py-2 w-full text-[16px] md:text-md text-white bg-transparent placeholder:text-white/60 resize-none active:outline-none focus:outline-none flex-grow"
placeholder={"Send a message"} placeholder={"Send a message"}
/> />
{buttonDisabled ? ( {buttonDisabled ? (
<StopGenerationButton /> <StopGenerationButton />
) : ( ) : (
<>
<button <button
ref={formRef} ref={formRef}
type="submit" type="submit"
className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4" className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4"
data-tooltip-id="send-prompt"
data-tooltip-content="Send prompt message to workspace"
aria-label="Send prompt message to workspace"
> >
<PaperPlaneRight className="w-7 h-7 my-3" weight="fill" /> <PaperPlaneRight className="w-7 h-7 my-3" weight="fill" />
<span className="sr-only">Send message</span> <span className="sr-only">Send message</span>
</button> </button>
<Tooltip
id="send-prompt"
place="bottom"
delayShow={300}
className="tooltip !text-xs z-99"
/>
</>
)} )}
</div> </div>
<div className="flex justify-between py-3.5"> <div className="flex justify-between py-3.5">

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@ -47,7 +47,7 @@ export default function TextToSpeechProvider({ settings }) {
const searchInputRef = useRef(null); const searchInputRef = useRef(null);
const handleSubmit = async (e) => { const handleSubmit = async (e) => {
e.preventDefault(); e?.preventDefault();
const form = e.target; const form = e.target;
const data = { TextToSpeechProvider: selectedProvider }; const data = { TextToSpeechProvider: selectedProvider };
const formData = new FormData(form); const formData = new FormData(form);
@ -110,10 +110,7 @@ export default function TextToSpeechProvider({ settings }) {
</div> </div>
<div className="w-full justify-end flex"> <div className="w-full justify-end flex">
{hasChanges && ( {hasChanges && (
<CTAButton <CTAButton className="mt-3 mr-0 -mb-14 z-10">
onClick={() => handleSubmit()}
className="mt-3 mr-0 -mb-14 z-10"
>
{saving ? "Saving..." : "Save changes"} {saving ? "Saving..." : "Save changes"}
</CTAButton> </CTAButton>
)} )}

View File

@ -10,6 +10,8 @@ import LocalAiLogo from "@/media/llmprovider/localai.png";
import OllamaLogo from "@/media/llmprovider/ollama.png"; import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import CohereLogo from "@/media/llmprovider/cohere.png"; import CohereLogo from "@/media/llmprovider/cohere.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PreLoader from "@/components/Preloader"; import PreLoader from "@/components/Preloader";
import ChangeWarningModal from "@/components/ChangeWarning"; import ChangeWarningModal from "@/components/ChangeWarning";
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions"; import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
@ -19,6 +21,7 @@ import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbedd
import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions"; import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions"; import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions";
import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions"; import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions";
import VoyageAiOptions from "@/components/EmbeddingSelection/VoyageAiOptions";
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem"; import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -78,6 +81,13 @@ const EMBEDDERS = [
options: (settings) => <CohereEmbeddingOptions settings={settings} />, options: (settings) => <CohereEmbeddingOptions settings={settings} />,
description: "Run powerful embedding models from Cohere.", description: "Run powerful embedding models from Cohere.",
}, },
{
name: "Voyage AI",
value: "voyageai",
logo: VoyageAiLogo,
options: (settings) => <VoyageAiOptions settings={settings} />,
description: "Run powerful embedding models from Voyage AI.",
},
]; ];
export default function GeneralEmbeddingPreference() { export default function GeneralEmbeddingPreference() {

View File

@ -20,6 +20,7 @@ import GroqLogo from "@/media/llmprovider/groq.png";
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png"; import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png"; import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
import CohereLogo from "@/media/llmprovider/cohere.png"; import CohereLogo from "@/media/llmprovider/cohere.png";
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import PreLoader from "@/components/Preloader"; import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions"; import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@ -36,12 +37,13 @@ import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions"; import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions"; import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions"; import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
import LLMItem from "@/components/LLMSelection/LLMItem"; import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import CTAButton from "@/components/lib/CTAButton"; import CTAButton from "@/components/lib/CTAButton";
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
export const AVAILABLE_LLM_PROVIDERS = [ export const AVAILABLE_LLM_PROVIDERS = [
{ {
@ -184,6 +186,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run Cohere's powerful Command models.", description: "Run Cohere's powerful Command models.",
requiredConfig: ["CohereApiKey"], requiredConfig: ["CohereApiKey"],
}, },
{
name: "LiteLLM",
value: "litellm",
logo: LiteLLMLogo,
options: (settings) => <LiteLLMOptions settings={settings} />,
description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
requiredConfig: ["LiteLLMBasePath"],
},
{ {
name: "Generic OpenAI", name: "Generic OpenAI",
value: "generic-openai", value: "generic-openai",

View File

@ -12,6 +12,23 @@ import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import CTAButton from "@/components/lib/CTAButton"; import CTAButton from "@/components/lib/CTAButton";
const PROVIDERS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: (settings) => <OpenAiWhisperOptions settings={settings} />,
description: "Leverage the OpenAI Whisper-large model using your API key.",
},
{
name: "AnythingLLM Built-In",
value: "local",
logo: AnythingLLMIcon,
options: (settings) => <NativeTranscriptionOptions settings={settings} />,
description: "Run a built-in whisper model on this instance privately.",
},
];
export default function TranscriptionModelPreference() { export default function TranscriptionModelPreference() {
const [saving, setSaving] = useState(false); const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false); const [hasChanges, setHasChanges] = useState(false);
@ -68,24 +85,6 @@ export default function TranscriptionModelPreference() {
fetchKeys(); fetchKeys();
}, []); }, []);
const PROVIDERS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: <OpenAiWhisperOptions settings={settings} />,
description:
"Leverage the OpenAI Whisper-large model using your API key.",
},
{
name: "AnythingLLM Built-In",
value: "local",
logo: AnythingLLMIcon,
options: <NativeTranscriptionOptions settings={settings} />,
description: "Run a built-in whisper model on this instance privately.",
},
];
useEffect(() => { useEffect(() => {
const filtered = PROVIDERS.filter((provider) => const filtered = PROVIDERS.filter((provider) =>
provider.name.toLowerCase().includes(searchQuery.toLowerCase()) provider.name.toLowerCase().includes(searchQuery.toLowerCase())
@ -228,7 +227,7 @@ export default function TranscriptionModelPreference() {
{selectedProvider && {selectedProvider &&
PROVIDERS.find( PROVIDERS.find(
(provider) => provider.value === selectedProvider (provider) => provider.value === selectedProvider
)?.options} )?.options(settings)}
</div> </div>
</div> </div>
</form> </form>

View File

@ -17,6 +17,8 @@ import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import GroqLogo from "@/media/llmprovider/groq.png"; import GroqLogo from "@/media/llmprovider/groq.png";
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png"; import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png"; import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import CohereLogo from "@/media/llmprovider/cohere.png"; import CohereLogo from "@/media/llmprovider/cohere.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png"; import ZillizLogo from "@/media/vectordbs/zilliz.png";
import AstraDBLogo from "@/media/vectordbs/astraDB.png"; import AstraDBLogo from "@/media/vectordbs/astraDB.png";
@ -26,6 +28,8 @@ import LanceDbLogo from "@/media/vectordbs/lancedb.png";
import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png";
import QDrantLogo from "@/media/vectordbs/qdrant.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png"; import MilvusLogo from "@/media/vectordbs/milvus.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import React, { useState, useEffect } from "react"; import React, { useState, useEffect } from "react";
import paths from "@/utils/paths"; import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
@ -168,6 +172,13 @@ export const LLM_SELECTION_PRIVACY = {
], ],
logo: CohereLogo, logo: CohereLogo,
}, },
litellm: {
name: "LiteLLM",
description: [
"Your model and chats are only accessible on the server running LiteLLM",
],
logo: LiteLLMLogo,
},
}; };
export const VECTOR_DB_PRIVACY = { export const VECTOR_DB_PRIVACY = {
@ -283,6 +294,13 @@ export const EMBEDDING_ENGINE_PRIVACY = {
], ],
logo: CohereLogo, logo: CohereLogo,
}, },
voyageai: {
name: "Voyage AI",
description: [
"Data sent to Voyage AI's servers is shared according to the terms of service of voyageai.com.",
],
logo: VoyageAiLogo,
},
}; };
export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) { export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {

View File

@ -16,6 +16,8 @@ import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import GroqLogo from "@/media/llmprovider/groq.png"; import GroqLogo from "@/media/llmprovider/groq.png";
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png"; import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png"; import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import CohereLogo from "@/media/llmprovider/cohere.png"; import CohereLogo from "@/media/llmprovider/cohere.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions"; import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@ -32,14 +34,15 @@ import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions"; import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions"; import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions"; import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
import LLMItem from "@/components/LLMSelection/LLMItem"; import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system"; import System from "@/models/system";
import paths from "@/utils/paths"; import paths from "@/utils/paths";
import showToast from "@/utils/toast"; import showToast from "@/utils/toast";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
const TITLE = "LLM Preference"; const TITLE = "LLM Preference";
const DESCRIPTION = const DESCRIPTION =
@ -162,6 +165,13 @@ const LLMS = [
options: (settings) => <CohereAiOptions settings={settings} />, options: (settings) => <CohereAiOptions settings={settings} />,
description: "Run Cohere's powerful Command models.", description: "Run Cohere's powerful Command models.",
}, },
{
name: "LiteLLM",
value: "litellm",
logo: LiteLLMLogo,
options: (settings) => <LiteLLMOptions settings={settings} />,
description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
},
{ {
name: "Generic OpenAI", name: "Generic OpenAI",
value: "generic-openai", value: "generic-openai",

View File

@ -0,0 +1,47 @@
import PostgreSQLLogo from "./icons/postgresql.png";
import MySQLLogo from "./icons/mysql.png";
import MSSQLLogo from "./icons/mssql.png";
import { X } from "@phosphor-icons/react";
export const DB_LOGOS = {
postgresql: PostgreSQLLogo,
mysql: MySQLLogo,
"sql-server": MSSQLLogo,
};
export default function DBConnection({ connection, onRemove }) {
const { database_id, engine } = connection;
function removeConfirmation() {
if (
!window.confirm(
`Delete ${database_id} from the list of available SQL connections? This cannot be undone.`
)
) {
return false;
}
onRemove(database_id);
}
return (
<div className="flex gap-x-4 items-center">
<img
src={DB_LOGOS?.[engine] ?? null}
alt={`${engine} logo`}
className="w-10 h-10 rounded-md"
/>
<div className="flex w-full items-center justify-between">
<div className="flex flex-col">
<div className="text-sm font-semibold text-white">{database_id}</div>
<div className="mt-1 text-xs text-[#D2D5DB]">{engine}</div>
</div>
<button
type="button"
onClick={removeConfirmation}
className="border-none text-white/40 hover:text-red-500"
>
<X size={24} />
</button>
</div>
</div>
);
}

View File

@ -0,0 +1,271 @@
import { useState } from "react";
import { createPortal } from "react-dom";
import ModalWrapper from "@/components/ModalWrapper";
import { WarningOctagon, X } from "@phosphor-icons/react";
import { DB_LOGOS } from "./DBConnection";
function assembleConnectionString({
engine,
username = "",
password = "",
host = "",
port = "",
database = "",
}) {
if ([username, password, host, database].every((i) => !!i) === false)
return `Please fill out all the fields above.`;
switch (engine) {
case "postgresql":
return `postgres://${username}:${password}@${host}:${port}/${database}`;
case "mysql":
return `mysql://${username}:${password}@${host}:${port}/${database}`;
case "sql-server":
return `mssql://${username}:${password}@${host}:${port}/${database}`;
default:
return null;
}
}
const DEFAULT_ENGINE = "postgresql";
const DEFAULT_CONFIG = {
username: null,
password: null,
host: null,
port: null,
database: null,
};
export default function NewSQLConnection({ isOpen, closeModal, onSubmit }) {
const [engine, setEngine] = useState(DEFAULT_ENGINE);
const [config, setConfig] = useState(DEFAULT_CONFIG);
if (!isOpen) return null;
function handleClose() {
setEngine(DEFAULT_ENGINE);
setConfig(DEFAULT_CONFIG);
closeModal();
}
function onFormChange() {
const form = new FormData(document.getElementById("sql-connection-form"));
setConfig({
username: form.get("username").trim(),
password: form.get("password"),
host: form.get("host").trim(),
port: form.get("port").trim(),
database: form.get("database").trim(),
});
}
async function handleUpdate(e) {
e.preventDefault();
e.stopPropagation();
const form = new FormData(e.target);
onSubmit({
engine,
database_id: form.get("name"),
connectionString: assembleConnectionString({ engine, ...config }),
});
handleClose();
return false;
}
// Cannot do nested forms, it will cause all sorts of issues, so we portal this out
// to the parent container form so we don't have nested forms.
return createPortal(
<ModalWrapper isOpen={isOpen}>
<div className="relative w-1/3 max-h-full ">
<div className="relative bg-main-gradient rounded-xl shadow-[0_4px_14px_rgba(0,0,0,0.25)] max-h-[90vh] overflow-y-scroll no-scroll">
<div className="flex items-start justify-between p-4 border-b rounded-t border-gray-500/50">
<h3 className="text-xl font-semibold text-white">
New SQL Connection
</h3>
<button
onClick={handleClose}
type="button"
className="border-none transition-all duration-300 text-gray-400 bg-transparent hover:border-white/60 rounded-lg text-sm p-1.5 ml-auto inline-flex items-center bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
data-modal-hide="staticModal"
>
<X className="text-gray-300 text-lg" />
</button>
</div>
<form
id="sql-connection-form"
onSubmit={handleUpdate}
onChange={onFormChange}
>
<div className="py-[17px] px-[20px] flex flex-col gap-y-6">
<p className="text-sm text-white">
Add the connection information for your database below and it
will be available for future SQL agent calls.
</p>
<div className="flex flex-col w-full">
<div className="border border-red-800 bg-zinc-800 p-4 rounded-lg flex items-center gap-x-2 text-sm text-red-400">
<WarningOctagon size={28} className="shrink-0" />
<p>
<b>WARNING:</b> The SQL agent has been <i>instructed</i> to
only perform non-modifying queries. This <b>does not</b>{" "}
prevent a hallucination from still deleting data. Only
connect with a user who has <b>READ_ONLY</b> permissions.
</p>
</div>
<label className="text-white text-sm font-semibold block my-4">
Select your SQL engine
</label>
<div className="flex w-full flex-wrap gap-x-4">
<DBEngine
provider="postgresql"
active={engine === "postgresql"}
onClick={() => setEngine("postgresql")}
/>
<DBEngine
provider="mysql"
active={engine === "mysql"}
onClick={() => setEngine("mysql")}
/>
<DBEngine
provider="sql-server"
active={engine === "sql-server"}
onClick={() => setEngine("sql-server")}
/>
</div>
</div>
<div className="flex flex-col w-full">
<label className="text-white text-sm font-semibold block mb-4">
Connection name
</label>
<input
type="text"
name="name"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="a unique name to identify this SQL connection"
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex gap-x-2">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Database user
</label>
<input
type="text"
name="username"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="root"
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Database user password
</label>
<input
type="text"
name="password"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="password123"
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
</div>
<div className="flex gap-x-2">
<div className="flex flex-col w-full">
<label className="text-white text-sm font-semibold block mb-4">
Server endpoint
</label>
<input
type="text"
name="host"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="the hostname or endpoint for your database"
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-30">
<label className="text-white text-sm font-semibold block mb-4">
Port
</label>
<input
type="text"
name="port"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="3306"
required={false}
autoComplete="off"
spellCheck={false}
/>
</div>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Database
</label>
<input
type="text"
name="database"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="the database the agent will interact with"
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<p className="text-white/40 text-sm">
{assembleConnectionString({ engine, ...config })}
</p>
</div>
<div className="flex w-full justify-between items-center p-3 space-x-2 border-t rounded-b border-gray-500/50">
<button
type="button"
onClick={handleClose}
className="border-none text-xs px-2 py-1 font-semibold rounded-lg bg-white hover:bg-transparent border-2 border-transparent hover:border-white hover:text-white h-[32px] w-fit -mr-8 whitespace-nowrap shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
>
Cancel
</button>
<button
type="submit"
form="sql-connection-form"
className="border-none text-xs px-2 py-1 font-semibold rounded-lg bg-[#46C8FF] hover:bg-[#2C2F36] border-2 border-transparent hover:border-[#46C8FF] hover:text-white h-[32px] w-fit -mr-8 whitespace-nowrap shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
>
Save connection
</button>
</div>
</form>
</div>
</div>
</ModalWrapper>,
document.getElementById("workspace-agent-settings-container")
);
}
function DBEngine({ provider, active, onClick }) {
return (
<button
type="button"
onClick={onClick}
className={`flex flex-col p-4 border border-white/40 bg-zinc-800 rounded-lg w-fit hover:bg-zinc-700 ${
active ? "!bg-blue-500/50" : ""
}`}
>
<img
src={DB_LOGOS[provider]}
className="h-[100px] rounded-md"
alt="PostgreSQL"
/>
</button>
);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

View File

@ -0,0 +1,109 @@
import React, { useState } from "react";
import DBConnection from "./DBConnection";
import { Plus } from "@phosphor-icons/react";
import NewSQLConnection from "./NewConnectionModal";
import { useModal } from "@/hooks/useModal";
export default function AgentSQLConnectorSelection({
skill,
settings,
toggleSkill,
enabled = false,
}) {
const { isOpen, openModal, closeModal } = useModal();
const [connections, setConnections] = useState(
settings?.preferences?.agent_sql_connections || []
);
return (
<>
<div className="border-b border-white/40 pb-4">
<div className="flex flex-col">
<div className="flex w-full justify-between items-center">
<label htmlFor="name" className="block input-label">
SQL Agent
</label>
<label className="border-none relative inline-flex cursor-pointer items-center mt-2">
<input
type="checkbox"
className="peer sr-only"
checked={enabled}
onClick={() => toggleSkill(skill)}
/>
<div className="pointer-events-none peer h-6 w-11 rounded-full bg-stone-400 after:absolute after:left-[2px] after:top-[2px] after:h-5 after:w-5 after:rounded-full after:shadow-xl after:border after:border-gray-600 after:bg-white after:box-shadow-md after:transition-all after:content-[''] peer-checked:bg-lime-300 peer-checked:after:translate-x-full peer-checked:after:border-white peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-800"></div>
<span className="ml-3 text-sm font-medium text-gray-900 dark:text-gray-300"></span>
</label>
</div>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
Enable your agent to be able to leverage SQL to answer you questions
by connecting to various SQL database providers.
</p>
</div>
{enabled && (
<>
<input
name="system::agent_sql_connections"
type="hidden"
value={JSON.stringify(connections)}
/>
<input
type="hidden"
value={JSON.stringify(
connections.filter((conn) => conn.action !== "remove")
)}
/>
<div className="flex flex-col mt-2 gap-y-2">
<p className="text-white font-semibold text-sm">
Your database connections
</p>
<div className="flex flex-col gap-y-3">
{connections
.filter((connection) => connection.action !== "remove")
.map((connection) => (
<DBConnection
key={connection.database_id}
connection={connection}
onRemove={(databaseId) => {
setConnections((prev) =>
prev.map((conn) => {
if (conn.database_id === databaseId)
return { ...conn, action: "remove" };
return conn;
})
);
}}
/>
))}
<button
type="button"
onClick={openModal}
className="w-fit relative flex h-[40px] items-center border-none hover:bg-slate-600/20 rounded-lg"
>
<div className="flex w-full gap-x-2 items-center p-4">
<div className="bg-zinc-600 p-2 rounded-lg h-[24px] w-[24px] flex items-center justify-center">
<Plus
weight="bold"
size={14}
className="shrink-0 text-slate-100"
/>
</div>
<p className="text-left text-slate-100 text-sm">
New SQL connection
</p>
</div>
</button>
</div>
</div>
</>
)}
</div>
<NewSQLConnection
isOpen={isOpen}
closeModal={closeModal}
onSubmit={(newDb) =>
setConnections((prev) => [...prev, { action: "add", ...newDb }])
}
/>
</>
);
}

View File

@ -5,6 +5,7 @@ import { castToType } from "@/utils/types";
import { useEffect, useRef, useState } from "react"; import { useEffect, useRef, useState } from "react";
import AgentLLMSelection from "./AgentLLMSelection"; import AgentLLMSelection from "./AgentLLMSelection";
import AgentWebSearchSelection from "./WebSearchSelection"; import AgentWebSearchSelection from "./WebSearchSelection";
import AgentSQLConnectorSelection from "./SQLConnectorSelection";
import GenericSkill from "./GenericSkill"; import GenericSkill from "./GenericSkill";
import Admin from "@/models/admin"; import Admin from "@/models/admin";
import * as Skeleton from "react-loading-skeleton"; import * as Skeleton from "react-loading-skeleton";
@ -205,6 +206,12 @@ function AvailableAgentSkills({ skills, settings, toggleAgentSkill }) {
toggleSkill={toggleAgentSkill} toggleSkill={toggleAgentSkill}
enabled={skills.includes("web-browsing")} enabled={skills.includes("web-browsing")}
/> />
<AgentSQLConnectorSelection
skill="sql-agent"
settings={settings}
toggleSkill={toggleAgentSkill}
enabled={skills.includes("sql-agent")}
/>
</div> </div>
</div> </div>
); );

View File

@ -20,19 +20,23 @@ export default function ChatTemperatureSettings({
LLM Temperature LLM Temperature
</label> </label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5"> <p className="text-white text-opacity-60 text-xs font-medium py-1.5">
This setting controls how &quot;random&quot; or dynamic your chat This setting controls how &quot;creative&quot; your LLM responses will
responses will be. be.
<br /> <br />
The higher the number (1.0 maximum) the more random and incoherent. The higher the number the more creative. For some models this can lead
to incoherent responses when set too high.
<br /> <br />
<i>Recommended: {defaults.temp}</i> <br />
<i>
Most LLMs have various acceptable ranges of valid values. Consult
your LLM provider for that information.
</i>
</p> </p>
</div> </div>
<input <input
name="openAiTemp" name="openAiTemp"
type="number" type="number"
min={0.0} min={0.0}
max={1.0}
step={0.1} step={0.1}
onWheel={(e) => e.target.blur()} onWheel={(e) => e.target.blur()}
defaultValue={workspace?.openAiTemp ?? defaults.temp} defaultValue={workspace?.openAiTemp ?? defaults.temp}

View File

@ -2,7 +2,6 @@ import Workspace from "@/models/workspace";
import { castToType } from "@/utils/types"; import { castToType } from "@/utils/types";
import showToast from "@/utils/toast"; import showToast from "@/utils/toast";
import { useEffect, useRef, useState } from "react"; import { useEffect, useRef, useState } from "react";
import VectorCount from "./VectorCount";
import WorkspaceName from "./WorkspaceName"; import WorkspaceName from "./WorkspaceName";
import SuggestedChatMessages from "./SuggestedChatMessages"; import SuggestedChatMessages from "./SuggestedChatMessages";
import DeleteWorkspace from "./DeleteWorkspace"; import DeleteWorkspace from "./DeleteWorkspace";
@ -51,7 +50,6 @@ export default function GeneralInfo({ slug }) {
onSubmit={handleUpdate} onSubmit={handleUpdate}
className="w-1/2 flex flex-col gap-y-6" className="w-1/2 flex flex-col gap-y-6"
> >
<VectorCount reload={true} workspace={workspace} />
<WorkspaceName <WorkspaceName
key={workspace.slug} key={workspace.slug}
workspace={workspace} workspace={workspace}

View File

@ -28,9 +28,6 @@ export default function VectorCount({ reload, workspace }) {
return ( return (
<div> <div>
<h3 className="input-label">Number of vectors</h3> <h3 className="input-label">Number of vectors</h3>
<p className="text-white text-opacity-60 text-xs font-medium py-1">
Total number of vectors in your vector database.
</p>
<p className="text-white text-opacity-60 text-sm font-medium"> <p className="text-white text-opacity-60 text-sm font-medium">
{totalVectors} {totalVectors}
</p> </p>

View File

@ -6,6 +6,7 @@ import VectorDBIdentifier from "./VectorDBIdentifier";
import MaxContextSnippets from "./MaxContextSnippets"; import MaxContextSnippets from "./MaxContextSnippets";
import DocumentSimilarityThreshold from "./DocumentSimilarityThreshold"; import DocumentSimilarityThreshold from "./DocumentSimilarityThreshold";
import ResetDatabase from "./ResetDatabase"; import ResetDatabase from "./ResetDatabase";
import VectorCount from "./VectorCount";
export default function VectorDatabase({ workspace }) { export default function VectorDatabase({ workspace }) {
const [hasChanges, setHasChanges] = useState(false); const [hasChanges, setHasChanges] = useState(false);
@ -38,7 +39,10 @@ export default function VectorDatabase({ workspace }) {
onSubmit={handleUpdate} onSubmit={handleUpdate}
className="w-1/2 flex flex-col gap-y-6" className="w-1/2 flex flex-col gap-y-6"
> >
<div className="flex items-start gap-x-5">
<VectorDBIdentifier workspace={workspace} /> <VectorDBIdentifier workspace={workspace} />
<VectorCount reload={true} workspace={workspace} />
</div>
<MaxContextSnippets workspace={workspace} setHasChanges={setHasChanges} /> <MaxContextSnippets workspace={workspace} setHasChanges={setHasChanges} />
<DocumentSimilarityThreshold <DocumentSimilarityThreshold
workspace={workspace} workspace={workspace}

View File

@ -79,6 +79,12 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096 # GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc # GENERIC_OPEN_AI_API_KEY=sk-123abc
# LLM_PROVIDER='litellm'
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# LLM_PROVIDER='cohere' # LLM_PROVIDER='cohere'
# COHERE_API_KEY= # COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r' # COHERE_MODEL_PREF='command-r'
@ -115,6 +121,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# COHERE_API_KEY= # COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0' # EMBEDDING_MODEL_PREF='embed-english-v3.0'
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
########################################### ###########################################
######## Vector Database Selection ######## ######## Vector Database Selection ########
########################################### ###########################################

3
server/.gitignore vendored
View File

@ -19,3 +19,6 @@ public/
documents documents
vector-cache vector-cache
yarn-error.log yarn-error.log
# Local SSL Certs for HTTPS
sslcert

View File

@ -350,6 +350,8 @@ function adminEndpoints(app) {
agent_search_provider: agent_search_provider:
(await SystemSettings.get({ label: "agent_search_provider" })) (await SystemSettings.get({ label: "agent_search_provider" }))
?.value || null, ?.value || null,
agent_sql_connections:
await SystemSettings.brief.agent_sql_connections(),
default_agent_skills: default_agent_skills:
safeJsonParse( safeJsonParse(
(await SystemSettings.get({ label: "default_agent_skills" })) (await SystemSettings.get({ label: "default_agent_skills" }))

View File

@ -447,6 +447,76 @@ function apiWorkspaceEndpoints(app) {
} }
); );
app.post(
"/v1/workspace/:slug/update-pin",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Workspaces']
#swagger.description = 'Add or remove pin from a document in a workspace by its unique slug.'
#swagger.path = '/workspace/{slug}/update-pin'
#swagger.parameters['slug'] = {
in: 'path',
description: 'Unique slug of workspace to find',
required: true,
type: 'string'
}
#swagger.requestBody = {
description: 'JSON object with the document path and pin status to update.',
required: true,
type: 'object',
content: {
"application/json": {
example: {
docPath: "custom-documents/my-pdf.pdf-hash.json",
pinStatus: true
}
}
}
}
#swagger.responses[200] = {
description: 'OK',
content: {
"application/json": {
schema: {
type: 'object',
example: {
message: 'Pin status updated successfully'
}
}
}
}
}
#swagger.responses[404] = {
description: 'Document not found'
}
#swagger.responses[500] = {
description: 'Internal Server Error'
}
*/
try {
const { slug = null } = request.params;
const { docPath, pinStatus = false } = reqBody(request);
const workspace = await Workspace.get({ slug });
const document = await Document.get({
workspaceId: workspace.id,
docpath: docPath,
});
if (!document) return response.sendStatus(404).end();
await Document.update(document.id, { pinned: pinStatus });
return response
.status(200)
.json({ message: "Pin status updated successfully" })
.end();
} catch (error) {
console.error("Error processing the pin status update:", error);
return response.status(500).end();
}
}
);
app.post( app.post(
"/v1/workspace/:slug/chat", "/v1/workspace/:slug/chat",
[validApiKey], [validApiKey],
@ -533,6 +603,7 @@ function apiWorkspaceEndpoints(app) {
}); });
response.status(200).json({ ...result }); response.status(200).json({ ...result });
} catch (e) { } catch (e) {
console.log(e.message, e);
response.status(500).json({ response.status(500).json({
id: uuidv4(), id: uuidv4(),
type: "abort", type: "abort",
@ -655,7 +726,7 @@ function apiWorkspaceEndpoints(app) {
}); });
response.end(); response.end();
} catch (e) { } catch (e) {
console.error(e); console.log(e.message, e);
writeResponseChunk(response, { writeResponseChunk(response, {
id: uuidv4(), id: uuidv4(),
type: "abort", type: "abort",

View File

@ -40,7 +40,12 @@ app.use(
}) })
); );
require("express-ws")(app); if (!!process.env.ENABLE_HTTPS) {
bootSSL(app, process.env.SERVER_PORT || 3001);
} else {
require("express-ws")(app); // load WebSockets in non-SSL mode.
}
app.use("/api", apiRouter); app.use("/api", apiRouter);
systemEndpoints(apiRouter); systemEndpoints(apiRouter);
extensionEndpoints(apiRouter); extensionEndpoints(apiRouter);
@ -113,8 +118,6 @@ app.all("*", function (_, response) {
response.sendStatus(404); response.sendStatus(404);
}); });
if (!!process.env.ENABLE_HTTPS) { // In non-https mode we need to boot at the end since the server has not yet
bootSSL(app, process.env.SERVER_PORT || 3001); // started and is `.listen`ing.
} else { if (!process.env.ENABLE_HTTPS) bootHTTP(app, process.env.SERVER_PORT || 3001);
bootHTTP(app, process.env.SERVER_PORT || 3001);
}

View File

@ -1,6 +1,9 @@
const path = require("path"); const path = require("path");
const prisma = require("../utils/prisma"); const prisma = require("../utils/prisma");
const { isValidUrl } = require("../utils/http"); const { isValidUrl, safeJsonParse } = require("../utils/http");
const { default: slugify } = require("slugify");
const { v4 } = require("uuid");
process.env.NODE_ENV === "development" process.env.NODE_ENV === "development"
? require("dotenv").config({ path: `.env.${process.env.NODE_ENV}` }) ? require("dotenv").config({ path: `.env.${process.env.NODE_ENV}` })
: require("dotenv").config({ : require("dotenv").config({
@ -28,6 +31,7 @@ const SystemSettings = {
"text_splitter_chunk_overlap", "text_splitter_chunk_overlap",
"agent_search_provider", "agent_search_provider",
"default_agent_skills", "default_agent_skills",
"agent_sql_connections",
], ],
validations: { validations: {
footer_data: (updates) => { footer_data: (updates) => {
@ -69,6 +73,7 @@ const SystemSettings = {
}, },
agent_search_provider: (update) => { agent_search_provider: (update) => {
try { try {
if (update === "none") return null;
if (!["google-search-engine", "serper-dot-dev"].includes(update)) if (!["google-search-engine", "serper-dot-dev"].includes(update))
throw new Error("Invalid SERP provider."); throw new Error("Invalid SERP provider.");
return String(update); return String(update);
@ -89,6 +94,22 @@ const SystemSettings = {
return JSON.stringify([]); return JSON.stringify([]);
} }
}, },
agent_sql_connections: async (updates) => {
const existingConnections = safeJsonParse(
(await SystemSettings.get({ label: "agent_sql_connections" }))?.value,
[]
);
try {
const updatedConnections = mergeConnections(
existingConnections,
safeJsonParse(updates, [])
);
return JSON.stringify(updatedConnections);
} catch (e) {
console.error(`Failed to merge connections`);
return JSON.stringify(existingConnections ?? []);
}
},
}, },
currentSettings: async function () { currentSettings: async function () {
const { hasVectorCachedFiles } = require("../utils/files"); const { hasVectorCachedFiles } = require("../utils/files");
@ -134,6 +155,8 @@ const SystemSettings = {
// - then it can be shared. // - then it can be shared.
// -------------------------------------------------------- // --------------------------------------------------------
WhisperProvider: process.env.WHISPER_PROVIDER || "local", WhisperProvider: process.env.WHISPER_PROVIDER || "local",
WhisperModelPref:
process.env.WHISPER_MODEL_PREF || "Xenova/whisper-small",
// -------------------------------------------------------- // --------------------------------------------------------
// TTS/STT Selection Settings & Configs // TTS/STT Selection Settings & Configs
@ -208,12 +231,19 @@ const SystemSettings = {
// that takes no user input for the keys being modified. // that takes no user input for the keys being modified.
_updateSettings: async function (updates = {}) { _updateSettings: async function (updates = {}) {
try { try {
const updatePromises = Object.keys(updates).map((key) => { const updatePromises = [];
const validatedValue = this.validations.hasOwnProperty(key) for (const key of Object.keys(updates)) {
? this.validations[key](updates[key]) let validatedValue = updates[key];
: updates[key]; if (this.validations.hasOwnProperty(key)) {
if (this.validations[key].constructor.name === "AsyncFunction") {
validatedValue = await this.validations[key](updates[key]);
} else {
validatedValue = this.validations[key](updates[key]);
}
}
return prisma.system_settings.upsert({ updatePromises.push(
prisma.system_settings.upsert({
where: { label: key }, where: { label: key },
update: { update: {
value: validatedValue === null ? null : String(validatedValue), value: validatedValue === null ? null : String(validatedValue),
@ -222,8 +252,9 @@ const SystemSettings = {
label: key, label: key,
value: validatedValue === null ? null : String(validatedValue), value: validatedValue === null ? null : String(validatedValue),
}, },
}); })
}); );
}
await Promise.all(updatePromises); await Promise.all(updatePromises);
return { success: true, error: null }; return { success: true, error: null };
@ -328,6 +359,8 @@ const SystemSettings = {
// Gemini Keys // Gemini Keys
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY, GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
GeminiLLMModelPref: process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro", GeminiLLMModelPref: process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro",
GeminiSafetySetting:
process.env.GEMINI_SAFETY_SETTING || "BLOCK_MEDIUM_AND_ABOVE",
// LMStudio Keys // LMStudio Keys
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH, LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
@ -384,6 +417,12 @@ const SystemSettings = {
TextGenWebUITokenLimit: process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT, TextGenWebUITokenLimit: process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT,
TextGenWebUIAPIKey: !!process.env.TEXT_GEN_WEB_UI_API_KEY, TextGenWebUIAPIKey: !!process.env.TEXT_GEN_WEB_UI_API_KEY,
// LiteLLM Keys
LiteLLMModelPref: process.env.LITE_LLM_MODEL_PREF,
LiteLLMTokenLimit: process.env.LITE_LLM_MODEL_TOKEN_LIMIT,
LiteLLMBasePath: process.env.LITE_LLM_BASE_PATH,
LiteLLMApiKey: !!process.env.LITE_LLM_API_KEY,
// Generic OpenAI Keys // Generic OpenAI Keys
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH, GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF, GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
@ -394,8 +433,63 @@ const SystemSettings = {
// Cohere API Keys // Cohere API Keys
CohereApiKey: !!process.env.COHERE_API_KEY, CohereApiKey: !!process.env.COHERE_API_KEY,
CohereModelPref: process.env.COHERE_MODEL_PREF, CohereModelPref: process.env.COHERE_MODEL_PREF,
// VoyageAi API Keys
VoyageAiApiKey: !!process.env.VOYAGEAI_API_KEY,
}; };
}, },
// For special retrieval of a key setting that does not expose any credential information
brief: {
agent_sql_connections: async function () {
const setting = await SystemSettings.get({
label: "agent_sql_connections",
});
if (!setting) return [];
return safeJsonParse(setting.value, []).map((dbConfig) => {
const { connectionString, ...rest } = dbConfig;
return rest;
});
},
},
}; };
function mergeConnections(existingConnections = [], updates = []) {
let updatedConnections = [...existingConnections];
const existingDbIds = existingConnections.map((conn) => conn.database_id);
// First remove all 'action:remove' candidates from existing connections.
const toRemove = updates
.filter((conn) => conn.action === "remove")
.map((conn) => conn.database_id);
updatedConnections = updatedConnections.filter(
(conn) => !toRemove.includes(conn.database_id)
);
// Next add all 'action:add' candidates into the updatedConnections; We DO NOT validate the connection strings.
// but we do validate their database_id is unique.
updates
.filter((conn) => conn.action === "add")
.forEach((update) => {
if (!update.connectionString) return; // invalid connection string
// Remap name to be unique to entire set.
if (existingDbIds.includes(update.database_id)) {
update.database_id = slugify(
`${update.database_id}-${v4().slice(0, 4)}`
);
} else {
update.database_id = slugify(update.database_id);
}
updatedConnections.push({
engine: update.engine,
database_id: update.database_id,
connectionString: update.connectionString,
});
});
return updatedConnections;
}
module.exports.SystemSettings = SystemSettings; module.exports.SystemSettings = SystemSettings;

View File

@ -58,11 +58,14 @@
"langchain": "0.1.36", "langchain": "0.1.36",
"mime": "^3.0.0", "mime": "^3.0.0",
"moment": "^2.29.4", "moment": "^2.29.4",
"mssql": "^10.0.2",
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"mysql2": "^3.9.7",
"node-html-markdown": "^1.3.0", "node-html-markdown": "^1.3.0",
"node-llama-cpp": "^2.8.0", "node-llama-cpp": "^2.8.0",
"ollama": "^0.5.0", "ollama": "^0.5.0",
"openai": "4.38.5", "openai": "4.38.5",
"pg": "^8.11.5",
"pinecone-client": "^1.1.0", "pinecone-client": "^1.1.0",
"pluralize": "^8.0.0", "pluralize": "^8.0.0",
"posthog-node": "^3.1.1", "posthog-node": "^3.1.1",
@ -72,6 +75,7 @@
"sqlite3": "^5.1.6", "sqlite3": "^5.1.6",
"swagger-autogen": "^2.23.5", "swagger-autogen": "^2.23.5",
"swagger-ui-express": "^5.0.0", "swagger-ui-express": "^5.0.0",
"url-pattern": "^1.0.3",
"uuid": "^9.0.0", "uuid": "^9.0.0",
"uuid-apikey": "^1.5.3", "uuid-apikey": "^1.5.3",
"vectordb": "0.4.11", "vectordb": "0.4.11",

View File

@ -2000,6 +2000,69 @@
} }
} }
}, },
"/workspace/{slug}/update-pin": {
"post": {
"tags": [
"Workspaces"
],
"description": "Add or remove pin from a document in a workspace by its unique slug.",
"parameters": [
{
"name": "slug",
"in": "path",
"required": true,
"schema": {
"type": "string"
},
"description": "Unique slug of workspace to find"
},
{
"name": "Authorization",
"in": "header",
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "object",
"example": {
"message": "Pin status updated successfully"
}
}
}
}
},
"403": {
"description": "Forbidden"
},
"404": {
"description": "Document not found"
},
"500": {
"description": "Internal Server Error"
}
},
"requestBody": {
"description": "JSON object with the document path and pin status to update.",
"required": true,
"type": "object",
"content": {
"application/json": {
"example": {
"docPath": "custom-documents/my-pdf.pdf-hash.json",
"pinStatus": true
}
}
}
}
}
},
"/v1/workspace/{slug}/chat": { "/v1/workspace/{slug}/chat": {
"post": { "post": {
"tags": [ "tags": [

View File

@ -3,6 +3,7 @@ const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
class AnthropicLLM { class AnthropicLLM {
constructor(embedder = null, modelPreference = null) { constructor(embedder = null, modelPreference = null) {
@ -23,11 +24,7 @@ class AnthropicLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID ANTHROPIC SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Anthropic as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,4 +1,4 @@
const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi"); const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
@ -23,11 +23,7 @@ class AzureOpenAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -19,7 +19,8 @@ class CohereLLM {
system: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !!embedder ? embedder : new NativeEmbedder();
this.embedder = embedder ?? new NativeEmbedder();
} }
#appendContext(contextTexts = []) { #appendContext(contextTexts = []) {

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
@ -26,12 +27,9 @@ class GeminiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; // not used for Gemini this.defaultTemp = 0.7; // not used for Gemini
this.safetyThreshold = this.#fetchSafetyThreshold();
} }
#appendContext(contextTexts = []) { #appendContext(contextTexts = []) {
@ -46,6 +44,41 @@ class GeminiLLM {
); );
} }
// BLOCK_NONE can be a special candidate for some fields
// https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-attributes#how_to_remove_automated_response_blocking_for_select_safety_attributes
// so if you are wondering why BLOCK_NONE still failed, the link above will explain why.
#fetchSafetyThreshold() {
const threshold =
process.env.GEMINI_SAFETY_SETTING ?? "BLOCK_MEDIUM_AND_ABOVE";
const safetyThresholds = [
"BLOCK_NONE",
"BLOCK_ONLY_HIGH",
"BLOCK_MEDIUM_AND_ABOVE",
"BLOCK_LOW_AND_ABOVE",
];
return safetyThresholds.includes(threshold)
? threshold
: "BLOCK_MEDIUM_AND_ABOVE";
}
#safetySettings() {
return [
{
category: "HARM_CATEGORY_HATE_SPEECH",
threshold: this.safetyThreshold,
},
{
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
threshold: this.safetyThreshold,
},
{ category: "HARM_CATEGORY_HARASSMENT", threshold: this.safetyThreshold },
{
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
threshold: this.safetyThreshold,
},
];
}
streamingEnabled() { streamingEnabled() {
return "streamGetChatCompletion" in this; return "streamGetChatCompletion" in this;
} }
@ -146,6 +179,7 @@ class GeminiLLM {
)?.content; )?.content;
const chatThread = this.gemini.startChat({ const chatThread = this.gemini.startChat({
history: this.formatMessages(messages), history: this.formatMessages(messages),
safetySettings: this.#safetySettings(),
}); });
const result = await chatThread.sendMessage(prompt); const result = await chatThread.sendMessage(prompt);
const response = result.response; const response = result.response;
@ -167,6 +201,7 @@ class GeminiLLM {
)?.content; )?.content;
const chatThread = this.gemini.startChat({ const chatThread = this.gemini.startChat({
history: this.formatMessages(messages), history: this.formatMessages(messages),
safetySettings: this.#safetySettings(),
}); });
const responseStream = await chatThread.sendMessageStream(prompt); const responseStream = await chatThread.sendMessageStream(prompt);
if (!responseStream.stream) if (!responseStream.stream)

View File

@ -2,6 +2,7 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
const { toValidNumber } = require("../../http");
class GenericOpenAiLLM { class GenericOpenAiLLM {
constructor(embedder = null, modelPreference = null) { constructor(embedder = null, modelPreference = null) {
@ -18,7 +19,9 @@ class GenericOpenAiLLM {
}); });
this.model = this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null; modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024; this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
: 1024;
if (!this.model) if (!this.model)
throw new Error("GenericOpenAI must have a valid model set."); throw new Error("GenericOpenAI must have a valid model set.");
this.limits = { this.limits = {
@ -27,11 +30,7 @@ class GenericOpenAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for GenericOpenAiLLM - falling back to NativeEmbedder for embedding!"
);
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -20,7 +20,7 @@ class GroqLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,5 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -26,11 +25,7 @@ class HuggingFaceLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for HuggingFaceLLM - falling back to Native for embedding!"
);
this.embedder = !embedder ? new OpenAiEmbedder() : new NativeEmbedder();
this.defaultTemp = 0.2; this.defaultTemp = 0.2;
} }

View File

@ -26,11 +26,7 @@ class KoboldCPPLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!"
);
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -0,0 +1,174 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
class LiteLLM {
constructor(embedder = null, modelPreference = null) {
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.LITE_LLM_BASE_PATH)
throw new Error(
"LiteLLM must have a valid base path to use for the api."
);
this.basePath = process.env.LITE_LLM_BASE_PATH;
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.LITE_LLM_API_KEY ?? null,
});
this.model = modelPreference ?? process.env.LITE_LLM_MODEL_PREF ?? null;
this.maxTokens = process.env.LITE_LLM_MODEL_TOKEN_LIMIT ?? 1024;
if (!this.model) throw new Error("LiteLLM must have a valid model set.");
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.LITE_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Short circuit since we have no idea if the model is valid or not
// in pre-flight for generic endpoints
isValidChatCompletionModel(_modelName = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async isSafe(_input = "") {
// Not implemented so must be stubbed
return { safe: true, reasons: [] };
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
max_tokens: parseInt(this.maxTokens), // LiteLLM requires int
})
.catch((e) => {
throw new Error(e.response.data.error.message);
});
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
return null;
return result.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
max_tokens: parseInt(this.maxTokens), // LiteLLM requires int
});
return streamRequest;
}
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
const handleAbort = () => clientAbortedHandler(resolve, fullText);
response.on("close", handleAbort);
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LiteLLM does not give a finish reason in stream until the final chunk
if (message.finish_reason || message.finish_reason === "stop") {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
resolve(fullText);
}
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
LiteLLM,
};

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -27,11 +28,7 @@ class LMStudioLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -19,11 +20,7 @@ class LocalAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -20,11 +21,7 @@ class MistralLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!"
);
this.embedder = embedder;
this.defaultTemp = 0.0; this.defaultTemp = 0.0;
} }

View File

@ -23,7 +23,7 @@ class NativeLLM {
system: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = embedder || new NativeEmbedder(); this.embedder = embedder ?? new NativeEmbedder();
this.cacheDir = path.resolve( this.cacheDir = path.resolve(
process.env.STORAGE_DIR process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "downloaded") ? path.resolve(process.env.STORAGE_DIR, "models", "downloaded")

View File

@ -3,6 +3,7 @@ const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md // Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md
class OllamaAILLM { class OllamaAILLM {
@ -18,11 +19,7 @@ class OllamaAILLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,4 +1,4 @@
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -18,11 +18,7 @@ class OpenAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new OpenAiEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -36,7 +36,7 @@ class OpenRouterLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder)) if (!fs.existsSync(cacheFolder))

View File

@ -28,7 +28,7 @@ class PerplexityLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -23,7 +23,7 @@ class TextGenWebUILLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -23,11 +24,7 @@ class TogetherAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = !embedder ? new NativeEmbedder() : embedder;
throw new Error(
"INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -0,0 +1,45 @@
class VoyageAiEmbedder {
constructor() {
if (!process.env.VOYAGEAI_API_KEY)
throw new Error("No Voyage AI API key was set.");
const {
VoyageEmbeddings,
} = require("@langchain/community/embeddings/voyage");
const voyage = new VoyageEmbeddings({
apiKey: process.env.VOYAGEAI_API_KEY,
});
this.voyage = voyage;
this.model = process.env.EMBEDDING_MODEL_PREF || "voyage-large-2-instruct";
// Limit of how many strings we can process in a single pass to stay with resource or network limits
this.batchSize = 128; // Voyage AI's limit per request is 128 https://docs.voyageai.com/docs/rate-limits#use-larger-batches
this.embeddingMaxChunkLength = 4000; // https://docs.voyageai.com/docs/embeddings - assume a token is roughly 4 letters with some padding
}
async embedTextInput(textInput) {
const result = await this.voyage.embedDocuments(
Array.isArray(textInput) ? textInput : [textInput],
{ modelName: this.model }
);
return result || [];
}
async embedChunks(textChunks = []) {
try {
const embeddings = await this.voyage.embedDocuments(textChunks, {
modelName: this.model,
batchSize: this.batchSize,
});
return embeddings;
} catch (error) {
console.error("Voyage AI Failed to embed:", error);
throw error;
}
}
}
module.exports = {
VoyageAiEmbedder,
};

View File

@ -17,6 +17,7 @@ class TextSplitter {
Config: { Config: {
chunkSize: number, chunkSize: number,
chunkOverlap: number, chunkOverlap: number,
chunkHeaderMeta: object | null, // Gets appended to top of each chunk as metadata
} }
------ ------
*/ */
@ -44,6 +45,18 @@ class TextSplitter {
return prefValue > limit ? limit : prefValue; return prefValue > limit ? limit : prefValue;
} }
stringifyHeader() {
if (!this.config.chunkHeaderMeta) return null;
let content = "";
Object.entries(this.config.chunkHeaderMeta).map(([key, value]) => {
if (!key || !value) return;
content += `${key}: ${value}\n`;
});
if (!content) return null;
return `<document_metadata>\n${content}</document_metadata>\n\n`;
}
#setSplitter(config = {}) { #setSplitter(config = {}) {
// if (!config?.splitByFilename) {// TODO do something when specific extension is present? } // if (!config?.splitByFilename) {// TODO do something when specific extension is present? }
return new RecursiveSplitter({ return new RecursiveSplitter({
@ -51,6 +64,7 @@ class TextSplitter {
chunkOverlap: isNaN(config?.chunkOverlap) chunkOverlap: isNaN(config?.chunkOverlap)
? 20 ? 20
: Number(config?.chunkOverlap), : Number(config?.chunkOverlap),
chunkHeader: this.stringifyHeader(),
}); });
} }
@ -61,11 +75,12 @@ class TextSplitter {
// Wrapper for Langchain default RecursiveCharacterTextSplitter class. // Wrapper for Langchain default RecursiveCharacterTextSplitter class.
class RecursiveSplitter { class RecursiveSplitter {
constructor({ chunkSize, chunkOverlap }) { constructor({ chunkSize, chunkOverlap, chunkHeader = null }) {
const { const {
RecursiveCharacterTextSplitter, RecursiveCharacterTextSplitter,
} = require("@langchain/textsplitters"); } = require("@langchain/textsplitters");
this.log(`Will split with`, { chunkSize, chunkOverlap }); this.log(`Will split with`, { chunkSize, chunkOverlap });
this.chunkHeader = chunkHeader;
this.engine = new RecursiveCharacterTextSplitter({ this.engine = new RecursiveCharacterTextSplitter({
chunkSize, chunkSize,
chunkOverlap, chunkOverlap,
@ -77,7 +92,14 @@ class RecursiveSplitter {
} }
async _splitText(documentText) { async _splitText(documentText) {
return this.engine.splitText(documentText); if (!this.chunkHeader) return this.engine.splitText(documentText);
const strings = await this.engine.splitText(documentText);
const documents = await this.engine.createDocuments(strings, [], {
chunkHeader: this.chunkHeader,
});
return documents
.filter((doc) => !!doc.pageContent)
.map((doc) => doc.pageContent);
} }
} }

View File

@ -498,6 +498,17 @@ Only return the role.
return availableNodes[Math.floor(Math.random() * availableNodes.length)]; return availableNodes[Math.floor(Math.random() * availableNodes.length)];
} }
/**
*
* @param {string} pluginName this name of the plugin being called
* @returns string of the plugin to be called compensating for children denoted by # in the string.
* eg: sql-agent:list-database-connections
*/
#parseFunctionName(pluginName = "") {
if (!pluginName.includes("#")) return pluginName;
return pluginName.split("#")[1];
}
/** /**
* Check if the chat has reached the maximum number of rounds. * Check if the chat has reached the maximum number of rounds.
*/ */
@ -550,7 +561,7 @@ ${this.getHistory({ to: route.to })
// get the functions that the node can call // get the functions that the node can call
const functions = fromConfig.functions const functions = fromConfig.functions
?.map((name) => this.functions.get(name)) ?.map((name) => this.functions.get(this.#parseFunctionName(name)))
.filter((a) => !!a); .filter((a) => !!a);
const provider = this.getProviderForConfig({ const provider = this.getProviderForConfig({

View File

@ -6,6 +6,7 @@ const { saveFileInBrowser } = require("./save-file-browser.js");
const { chatHistory } = require("./chat-history.js"); const { chatHistory } = require("./chat-history.js");
const { memory } = require("./memory.js"); const { memory } = require("./memory.js");
const { rechart } = require("./rechart.js"); const { rechart } = require("./rechart.js");
const { sqlAgent } = require("./sql-agent/index.js");
module.exports = { module.exports = {
webScraping, webScraping,
@ -16,6 +17,7 @@ module.exports = {
chatHistory, chatHistory,
memory, memory,
rechart, rechart,
sqlAgent,
// Plugin name aliases so they can be pulled by slug as well. // Plugin name aliases so they can be pulled by slug as well.
[webScraping.name]: webScraping, [webScraping.name]: webScraping,
@ -26,4 +28,5 @@ module.exports = {
[chatHistory.name]: chatHistory, [chatHistory.name]: chatHistory,
[memory.name]: memory, [memory.name]: memory,
[rechart.name]: rechart, [rechart.name]: rechart,
[sqlAgent.name]: sqlAgent,
}; };

View File

@ -0,0 +1,89 @@
const mssql = require("mssql");
const UrlPattern = require("url-pattern");
class MSSQLConnector {
#connected = false;
database_id = "";
connectionConfig = {
user: null,
password: null,
database: null,
server: null,
port: null,
pool: {
max: 10,
min: 0,
idleTimeoutMillis: 30000,
},
options: {
encrypt: false,
trustServerCertificate: true,
},
};
constructor(
config = {
// we will force into RFC-3986 from DB
// eg: mssql://user:password@server:port/database?{...opts}
connectionString: null, // we will force into RFC-3986
}
) {
this.connectionString = config.connectionString;
this._client = null;
this.#parseDatabase();
}
#parseDatabase() {
const connectionPattern = new UrlPattern(
"mssql\\://:username\\::password@*\\::port/:database*"
);
const match = connectionPattern.match(this.connectionString);
this.database_id = match?.database;
this.connectionConfig = {
...this.connectionConfig,
user: match?.username,
password: match?.password,
database: match?.database,
server: match?._[0],
port: match?.port ? Number(match.port) : null,
};
}
async connect() {
this._client = await mssql.connect(this.connectionConfig);
this.#connected = true;
return this._client;
}
/**
*
* @param {string} queryString the SQL query to be run
* @returns {import(".").QueryResult}
*/
async runQuery(queryString = "") {
const result = { rows: [], count: 0, error: null };
try {
if (!this.#connected) await this.connect();
const query = await this._client.query(queryString);
result.rows = query.recordset;
result.count = query.rowsAffected.reduce((sum, a) => sum + a, 0);
} catch (err) {
console.log(this.constructor.name, err);
result.error = err.message;
} finally {
await this._client.close();
this.#connected = false;
}
return result;
}
getTablesSql() {
return `SELECT name FROM sysobjects WHERE xtype='U';`;
}
getTableSchemaSql(table_name) {
return `SELECT COLUMN_NAME,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='${table_name}'`;
}
}
module.exports.MSSQLConnector = MSSQLConnector;

View File

@ -0,0 +1,59 @@
const mysql = require("mysql2/promise");
const UrlPattern = require("url-pattern");
class MySQLConnector {
#connected = false;
database_id = "";
constructor(
config = {
connectionString: null,
}
) {
this.connectionString = config.connectionString;
this._client = null;
this.database_id = this.#parseDatabase();
}
#parseDatabase() {
const connectionPattern = new UrlPattern("mysql\\://*@*/:database*");
const match = connectionPattern.match(this.connectionString);
return match?.database;
}
async connect() {
this._client = await mysql.createConnection({ uri: this.connectionString });
this.#connected = true;
return this._client;
}
/**
*
* @param {string} queryString the SQL query to be run
* @returns {import(".").QueryResult}
*/
async runQuery(queryString = "") {
const result = { rows: [], count: 0, error: null };
try {
if (!this.#connected) await this.connect();
const [query] = await this._client.query(queryString);
result.rows = query;
result.count = query?.length;
} catch (err) {
console.log(this.constructor.name, err);
result.error = err.message;
} finally {
await this._client.end();
this.#connected = false;
}
return result;
}
getTablesSql() {
return `SELECT table_name FROM information_schema.tables WHERE table_schema = '${this.database_id}'`;
}
getTableSchemaSql(table_name) {
return `SHOW COLUMNS FROM ${this.database_id}.${table_name};`;
}
}
module.exports.MySQLConnector = MySQLConnector;

View File

@ -0,0 +1,52 @@
const pgSql = require("pg");
class PostgresSQLConnector {
#connected = false;
constructor(
config = {
connectionString: null,
}
) {
this.connectionString = config.connectionString;
this._client = new pgSql.Client({
connectionString: this.connectionString,
});
}
async connect() {
await this._client.connect();
this.#connected = true;
return this._client;
}
/**
*
* @param {string} queryString the SQL query to be run
* @returns {import(".").QueryResult}
*/
async runQuery(queryString = "") {
const result = { rows: [], count: 0, error: null };
try {
if (!this.#connected) await this.connect();
const query = await this._client.query(queryString);
result.rows = query.rows;
result.count = query.rowCount;
} catch (err) {
console.log(this.constructor.name, err);
result.error = err.message;
} finally {
await this._client.end();
this.#connected = false;
}
return result;
}
getTablesSql() {
return `SELECT * FROM pg_catalog.pg_tables WHERE schemaname = 'public'`;
}
getTableSchemaSql(table_name) {
return ` select column_name, data_type, character_maximum_length, column_default, is_nullable from INFORMATION_SCHEMA.COLUMNS where table_name = '${table_name}'`;
}
}
module.exports.PostgresSQLConnector = PostgresSQLConnector;

View File

@ -0,0 +1,60 @@
const { SystemSettings } = require("../../../../../../models/systemSettings");
const { safeJsonParse } = require("../../../../../http");
/**
* @typedef {('postgresql'|'mysql'|'sql-server')} SQLEngine
*/
/**
* @typedef {Object} QueryResult
* @property {[number]} rows - The query result rows
* @property {number} count - Number of rows the query returned/changed
* @property {string|null} error - Error string if there was an issue
*/
/**
* A valid database SQL connection object
* @typedef {Object} SQLConnection
* @property {string} database_id - Unique identifier of the database connection
* @property {SQLEngine} engine - Engine used by connection
* @property {string} connectionString - RFC connection string for db
*/
/**
* @param {SQLEngine} identifier
* @param {object} connectionConfig
* @returns Database Connection Engine Class for SQLAgent or throws error
*/
function getDBClient(identifier = "", connectionConfig = {}) {
switch (identifier) {
case "mysql":
const { MySQLConnector } = require("./MySQL");
return new MySQLConnector(connectionConfig);
case "postgresql":
const { PostgresSQLConnector } = require("./Postgresql");
return new PostgresSQLConnector(connectionConfig);
case "sql-server":
const { MSSQLConnector } = require("./MSSQL");
return new MSSQLConnector(connectionConfig);
default:
throw new Error(
`There is no supported database connector for ${identifier}`
);
}
}
/**
* Lists all of the known database connection that can be used by the agent.
* @returns {Promise<[SQLConnection]>}
*/
async function listSQLConnections() {
return safeJsonParse(
(await SystemSettings.get({ label: "agent_sql_connections" }))?.value,
[]
);
}
module.exports = {
getDBClient,
listSQLConnections,
};

View File

@ -0,0 +1,98 @@
module.exports.SqlAgentGetTableSchema = {
name: "sql-get-table-schema",
plugin: function () {
const {
listSQLConnections,
getDBClient,
} = require("./SQLConnectors/index.js");
return {
name: "sql-get-table-schema",
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
description:
"Gets the table schema in SQL for a given `table` and `database_id`",
examples: [
{
prompt: "What does the customers table in access-logs look like?",
call: JSON.stringify({
database_id: "access-logs",
table_name: "customers",
}),
},
{
prompt:
"Get me the full name of a company in records-main, the table should be call comps",
call: JSON.stringify({
database_id: "records-main",
table_name: "comps",
}),
},
],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: {
database_id: {
type: "string",
description:
"The database identifier for which we will connect to to query the table schema. This is a required field.",
},
table_name: {
type: "string",
description:
"The database identifier for the table name we want the schema for. This is a required field.",
},
},
additionalProperties: false,
},
required: ["database_id", "table_name"],
handler: async function ({ database_id = "", table_name = "" }) {
this.super.handlerProps.log(`Using the sql-get-table-schema tool.`);
try {
const databaseConfig = (await listSQLConnections()).find(
(db) => db.database_id === database_id
);
if (!databaseConfig) {
this.super.handlerProps.log(
`sql-get-table-schema to find config!.`,
database_id
);
return `No database connection for ${database_id} was found!`;
}
const db = getDBClient(databaseConfig.engine, databaseConfig);
this.super.introspect(
`${this.caller}: Querying the table schema for ${table_name} in the ${databaseConfig.database_id} database.`
);
this.super.introspect(
`Running SQL: ${db.getTableSchemaSql(table_name)}`
);
const result = await db.runQuery(
db.getTableSchemaSql(table_name)
);
if (result.error) {
this.super.handlerProps.log(
`sql-get-table-schema tool reported error`,
result.error
);
this.super.introspect(`Error: ${result.error}`);
return `There was an error running the query: ${result.error}`;
}
return JSON.stringify(result);
} catch (e) {
this.super.handlerProps.log(
`sql-get-table-schema raised an error. ${e.message}`
);
return e.message;
}
},
});
},
};
},
};

View File

@ -0,0 +1,21 @@
const { SqlAgentGetTableSchema } = require("./get-table-schema");
const { SqlAgentListDatabase } = require("./list-database");
const { SqlAgentListTables } = require("./list-table");
const { SqlAgentQuery } = require("./query");
const sqlAgent = {
name: "sql-agent",
startupConfig: {
params: {},
},
plugin: [
SqlAgentListDatabase,
SqlAgentListTables,
SqlAgentGetTableSchema,
SqlAgentQuery,
],
};
module.exports = {
sqlAgent,
};

View File

@ -0,0 +1,49 @@
module.exports.SqlAgentListDatabase = {
name: "sql-list-databases",
plugin: function () {
const { listSQLConnections } = require("./SQLConnectors");
return {
name: "sql-list-databases",
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
description:
"List all available databases via `list_databases` you currently have access to. Returns a unique string identifier `database_id` that can be used for future calls.",
examples: [
{
prompt: "What databases can you access?",
call: JSON.stringify({}),
},
{
prompt: "What databases can you tell me about?",
call: JSON.stringify({}),
},
{
prompt: "Is there a database named erp-logs you can access?",
call: JSON.stringify({}),
},
],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: {},
additionalProperties: false,
},
handler: async function () {
this.super.handlerProps.log(`Using the sql-list-databases tool.`);
this.super.introspect(
`${this.caller}: Checking what are the available databases.`
);
const connections = (await listSQLConnections()).map((conn) => {
const { connectionString, ...rest } = conn;
return rest;
});
return JSON.stringify(connections);
},
});
},
};
},
};

View File

@ -0,0 +1,85 @@
module.exports.SqlAgentListTables = {
name: "sql-list-tables",
plugin: function () {
const {
listSQLConnections,
getDBClient,
} = require("./SQLConnectors/index.js");
return {
name: "sql-list-tables",
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
description:
"List all available tables in a database via its `database_id`.",
examples: [
{
prompt: "What tables are there in the `access-logs` database?",
call: JSON.stringify({ database_id: "access-logs" }),
},
{
prompt:
"What information can you access in the customer_accts postgres db?",
call: JSON.stringify({ database_id: "customer_accts" }),
},
{
prompt: "Can you tell me what is in the primary-logs db?",
call: JSON.stringify({ database_id: "primary-logs" }),
},
],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: {
database_id: {
type: "string",
description:
"The database identifier for which we will list all tables for. This is a required parameter",
},
},
additionalProperties: false,
},
required: ["database_id"],
handler: async function ({ database_id = "" }) {
try {
this.super.handlerProps.log(`Using the sql-list-tables tool.`);
const databaseConfig = (await listSQLConnections()).find(
(db) => db.database_id === database_id
);
if (!databaseConfig) {
this.super.handlerProps.log(
`sql-list-tables failed to find config!.`,
database_id
);
return `No database connection for ${database_id} was found!`;
}
const db = getDBClient(databaseConfig.engine, databaseConfig);
this.super.introspect(
`${this.caller}: Checking what are the available tables in the ${databaseConfig.database_id} database.`
);
this.super.introspect(`Running SQL: ${db.getTablesSql()}`);
const result = await db.runQuery(db.getTablesSql(database_id));
if (result.error) {
this.super.handlerProps.log(
`sql-list-tables tool reported error`,
result.error
);
this.super.introspect(`Error: ${result.error}`);
return `There was an error running the query: ${result.error}`;
}
return JSON.stringify(result);
} catch (e) {
console.error(e);
return e.message;
}
},
});
},
};
},
};

View File

@ -0,0 +1,101 @@
module.exports.SqlAgentQuery = {
name: "sql-query",
plugin: function () {
const {
getDBClient,
listSQLConnections,
} = require("./SQLConnectors/index.js");
return {
name: "sql-query",
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
description:
"Run a read-only SQL query on a `database_id` which will return up rows of data related to the query. The query must only be SELECT statements which do not modify the table data. There should be a reasonable LIMIT on the return quantity to prevent long-running or queries which crash the db.",
examples: [
{
prompt: "How many customers are in dvd-rentals?",
call: JSON.stringify({
database_id: "dvd-rentals",
sql_query: "SELECT * FROM customers",
}),
},
{
prompt: "Can you tell me the total volume of sales last month?",
call: JSON.stringify({
database_id: "sales-db",
sql_query:
"SELECT SUM(sale_amount) AS total_sales FROM sales WHERE sale_date >= DATEADD(month, -1, DATEFROMPARTS(YEAR(GETDATE()), MONTH(GETDATE()), 1)) AND sale_date < DATEFROMPARTS(YEAR(GETDATE()), MONTH(GETDATE()), 1)",
}),
},
{
prompt:
"Do we have anyone in the staff table for our production db named 'sam'? ",
call: JSON.stringify({
database_id: "production",
sql_query:
"SElECT * FROM staff WHERE first_name='sam%' OR last_name='sam%'",
}),
},
],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: {
database_id: {
type: "string",
description:
"The database identifier for which we will connect to to query the table schema. This is required to run the SQL query.",
},
sql_query: {
type: "string",
description:
"The raw SQL query to run. Should be a query which does not modify the table and will return results.",
},
},
additionalProperties: false,
},
required: ["database_id", "table_name"],
handler: async function ({ database_id = "", sql_query = "" }) {
this.super.handlerProps.log(`Using the sql-query tool.`);
try {
const databaseConfig = (await listSQLConnections()).find(
(db) => db.database_id === database_id
);
if (!databaseConfig) {
this.super.handlerProps.log(
`sql-query failed to find config!.`,
database_id
);
return `No database connection for ${database_id} was found!`;
}
this.super.introspect(
`${this.caller}: Im going to run a query on the ${database_id} to get an answer.`
);
const db = getDBClient(databaseConfig.engine, databaseConfig);
this.super.introspect(`Running SQL: ${sql_query}`);
const result = await db.runQuery(sql_query);
if (result.error) {
this.super.handlerProps.log(
`sql-query tool reported error`,
result.error
);
this.super.introspect(`Error: ${result.error}`);
return `There was an error running the query: ${result.error}`;
}
return JSON.stringify(result);
} catch (e) {
console.error(e);
return e.message;
}
},
});
},
};
},
};

View File

@ -1,6 +1,5 @@
const { Document } = require("../../../../models/documents"); const { Document } = require("../../../../models/documents");
const { safeJsonParse } = require("../../../http"); const { safeJsonParse } = require("../../../http");
const { validate } = require("uuid");
const { summarizeContent } = require("../utils/summarize"); const { summarizeContent } = require("../utils/summarize");
const Provider = require("../providers/ai-provider"); const Provider = require("../providers/ai-provider");

View File

@ -49,7 +49,7 @@ const websocket = {
setup(aibitat) { setup(aibitat) {
aibitat.onError(async (error) => { aibitat.onError(async (error) => {
if (!!error?.message) { if (!!error?.message) {
console.error(chalk.red(` error: ${error.message}`)); console.error(chalk.red(` error: ${error.message}`), error);
aibitat.introspect( aibitat.introspect(
`Error encountered while running: ${error.message}` `Error encountered while running: ${error.message}`
); );

View File

@ -3,7 +3,7 @@ const { RetryError } = require("../error.js");
const Provider = require("./ai-provider.js"); const Provider = require("./ai-provider.js");
/** /**
* The provider for the Anthropic API. * The agent provider for the Anthropic API.
* By default, the model is set to 'claude-2'. * By default, the model is set to 'claude-2'.
*/ */
class AnthropicProvider extends Provider { class AnthropicProvider extends Provider {

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the Azure OpenAI API. * The agent provider for the Azure OpenAI API.
*/ */
class AzureOpenAiProvider extends InheritMultiple([Provider, UnTooled]) { class AzureOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -84,6 +84,11 @@ class AzureOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
); );
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { result: completion.content, cost: 0 }; return { result: completion.content, cost: 0 };
} catch (error) { } catch (error) {
throw error; throw error;

View File

@ -2,9 +2,10 @@ const OpenAI = require("openai");
const Provider = require("./ai-provider.js"); const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js"); const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
const { toValidNumber } = require("../../../http/index.js");
/** /**
* The provider for the Generic OpenAI provider. * The agent provider for the Generic OpenAI provider.
* Since we cannot promise the generic provider even supports tool calling * Since we cannot promise the generic provider even supports tool calling
* which is nearly 100% likely it does not, we can just wrap it in untooled * which is nearly 100% likely it does not, we can just wrap it in untooled
* which often is far better anyway. * which often is far better anyway.
@ -24,7 +25,9 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
this._client = client; this._client = client;
this.model = model; this.model = model;
this.verbose = true; this.verbose = true;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024; this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
: 1024;
} }
get client() { get client() {
@ -94,6 +97,10 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -3,7 +3,7 @@ const Provider = require("./ai-provider.js");
const { RetryError } = require("../error.js"); const { RetryError } = require("../error.js");
/** /**
* The provider for the Groq provider. * The agent provider for the Groq provider.
* Using OpenAI tool calling with groq really sucks right now * Using OpenAI tool calling with groq really sucks right now
* its just fast and bad. We should probably migrate this to Untooled to improve * its just fast and bad. We should probably migrate this to Untooled to improve
* coherence. * coherence.

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the KoboldCPP provider. * The agent provider for the KoboldCPP provider.
*/ */
class KoboldCPPProvider extends InheritMultiple([Provider, UnTooled]) { class KoboldCPPProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -89,6 +89,10 @@ class KoboldCPPProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the LMStudio provider. * The agent provider for the LMStudio.
*/ */
class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) { class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -89,6 +89,10 @@ class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the LocalAI provider. * The agent provider for the LocalAI provider.
*/ */
class LocalAiProvider extends InheritMultiple([Provider, UnTooled]) { class LocalAiProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -93,6 +93,10 @@ class LocalAiProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { result: completion.content, cost: 0 }; return { result: completion.content, cost: 0 };
} catch (error) { } catch (error) {
throw error; throw error;

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the Mistral provider. * The agent provider for the Mistral provider.
* Mistral limits what models can call tools and even when using those * Mistral limits what models can call tools and even when using those
* the model names change and dont match docs. When you do have the right model * the model names change and dont match docs. When you do have the right model
* it still fails and is not truly OpenAI compatible so its easier to just wrap * it still fails and is not truly OpenAI compatible so its easier to just wrap
@ -93,6 +93,10 @@ class MistralProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -4,7 +4,7 @@ const UnTooled = require("./helpers/untooled.js");
const { Ollama } = require("ollama"); const { Ollama } = require("ollama");
/** /**
* The provider for the Ollama provider. * The agent provider for the Ollama provider.
*/ */
class OllamaProvider extends InheritMultiple([Provider, UnTooled]) { class OllamaProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -83,6 +83,10 @@ class OllamaProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.message; completion = response.message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -3,7 +3,7 @@ const Provider = require("./ai-provider.js");
const { RetryError } = require("../error.js"); const { RetryError } = require("../error.js");
/** /**
* The provider for the OpenAI API. * The agent provider for the OpenAI API.
* By default, the model is set to 'gpt-3.5-turbo'. * By default, the model is set to 'gpt-3.5-turbo'.
*/ */
class OpenAIProvider extends Provider { class OpenAIProvider extends Provider {

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the OpenRouter provider. * The agent provider for the OpenRouter provider.
*/ */
class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) { class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -93,6 +93,10 @@ class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the Perplexity provider. * The agent provider for the Perplexity provider.
*/ */
class PerplexityProvider extends InheritMultiple([Provider, UnTooled]) { class PerplexityProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -89,6 +89,10 @@ class PerplexityProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the Oobabooga provider. * The agent provider for the Oobabooga provider.
*/ */
class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) { class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -88,6 +88,10 @@ class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -4,7 +4,7 @@ const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
/** /**
* The provider for the TogetherAI provider. * The agent provider for the TogetherAI provider.
*/ */
class TogetherAIProvider extends InheritMultiple([Provider, UnTooled]) { class TogetherAIProvider extends InheritMultiple([Provider, UnTooled]) {
model; model;
@ -89,6 +89,10 @@ class TogetherAIProvider extends InheritMultiple([Provider, UnTooled]) {
completion = response.choices[0].message; completion = response.choices[0].message;
} }
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return { return {
result: completion.content, result: completion.content,
cost: 0, cost: 0,

View File

@ -38,6 +38,25 @@ class Deduplicator {
return this.#hashes.hasOwnProperty(newSig); return this.#hashes.hasOwnProperty(newSig);
} }
/**
* Resets the object property for this instance of the Deduplicator class
* @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
*/
reset(type = "runs") {
switch (type) {
case "runs":
this.#hashes = {};
break;
case "cooldowns":
this.#cooldowns = {};
break;
case "uniques":
this.#uniques = {};
break;
}
return;
}
startCooldown( startCooldown(
key, key,
parameters = { parameters = {

Some files were not shown because too many files have changed in this diff Show More