Merge branch 'master' into 1347-human-readable-confluence-url

This commit is contained in:
Timothy Carambat 2024-05-19 11:52:22 -07:00 committed by GitHub
commit cf3bfc5293
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 696 additions and 241 deletions

View File

@ -1,19 +1,23 @@
const fs = require("fs"); const fs = require("fs");
const path = require("path"); const path = require("path");
const { v4 } = require("uuid"); const { v4 } = require("uuid");
const defaultWhisper = "Xenova/whisper-small"; // Model Card: https://huggingface.co/Xenova/whisper-small
const fileSize = {
"Xenova/whisper-small": "250mb",
"Xenova/whisper-large": "1.56GB",
};
class LocalWhisper { class LocalWhisper {
constructor() { constructor({ options }) {
// Model Card: https://huggingface.co/Xenova/whisper-small this.model = options?.WhisperModelPref ?? defaultWhisper;
this.model = "Xenova/whisper-small"; this.fileSize = fileSize[this.model];
this.cacheDir = path.resolve( this.cacheDir = path.resolve(
process.env.STORAGE_DIR process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, `models`) ? path.resolve(process.env.STORAGE_DIR, `models`)
: path.resolve(__dirname, `../../../server/storage/models`) : path.resolve(__dirname, `../../../server/storage/models`)
); );
this.modelPath = path.resolve(this.cacheDir, "Xenova", "whisper-small"); this.modelPath = path.resolve(this.cacheDir, ...this.model.split("/"));
// Make directory when it does not exist in existing installations // Make directory when it does not exist in existing installations
if (!fs.existsSync(this.cacheDir)) if (!fs.existsSync(this.cacheDir))
fs.mkdirSync(this.cacheDir, { recursive: true }); fs.mkdirSync(this.cacheDir, { recursive: true });
@ -104,7 +108,7 @@ class LocalWhisper {
async client() { async client() {
if (!fs.existsSync(this.modelPath)) { if (!fs.existsSync(this.modelPath)) {
this.#log( this.#log(
`The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~250MB)` `The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~${this.fileSize})`
); );
} }

View File

@ -124,6 +124,10 @@ GID='1000'
# COHERE_API_KEY= # COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0' # EMBEDDING_MODEL_PREF='embed-english-v3.0'
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
########################################### ###########################################
######## Vector Database Selection ######## ######## Vector Database Selection ########
########################################### ###########################################

2
frontend/.gitignore vendored
View File

@ -9,10 +9,8 @@ lerna-debug.log*
node_modules node_modules
dist dist
lib
dist-ssr dist-ssr
*.local *.local
!frontend/components/lib
# Editor directories and files # Editor directories and files
.vscode/* .vscode/*

View File

@ -0,0 +1,50 @@
export default function VoyageAiOptions({ settings }) {
return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
API Key
</label>
<input
type="password"
name="VoyageAiApiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Voyage AI API Key"
defaultValue={settings?.VoyageAiApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Model Preference
</label>
<select
name="EmbeddingModelPref"
required={true}
defaultValue={settings?.EmbeddingModelPref}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<optgroup label="Available embedding models">
{[
"voyage-large-2-instruct",
"voyage-law-2",
"voyage-code-2",
"voyage-large-2",
"voyage-2",
].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</optgroup>
</select>
</div>
</div>
</div>
);
}

View File

@ -14,6 +14,8 @@ import {
import React, { useEffect, useState } from "react"; import React, { useEffect, useState } from "react";
import SettingsButton from "../SettingsButton"; import SettingsButton from "../SettingsButton";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import { Tooltip } from "react-tooltip";
import { v4 } from "uuid";
export const MAX_ICONS = 3; export const MAX_ICONS = 3;
export const ICON_COMPONENTS = { export const ICON_COMPONENTS = {
@ -47,36 +49,48 @@ export default function Footer() {
return ( return (
<div className="flex justify-center mb-2"> <div className="flex justify-center mb-2">
<div className="flex space-x-4"> <div className="flex space-x-4">
<a <ToolTipWrapper id="open-github">
href={paths.github()} <a
target="_blank" href={paths.github()}
rel="noreferrer" target="_blank"
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" rel="noreferrer"
aria-label="Find us on Github" className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
> aria-label="Find us on Github"
<GithubLogo weight="fill" className="h-5 w-5 " /> data-tooltip-id="open-github"
</a> data-tooltip-content="View source code on Github"
<a >
href={paths.docs()} <GithubLogo weight="fill" className="h-5 w-5 " />
target="_blank" </a>
rel="noreferrer" </ToolTipWrapper>
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" <ToolTipWrapper id="open-documentation">
aria-label="Docs" <a
> href={paths.docs()}
<BookOpen weight="fill" className="h-5 w-5 " /> target="_blank"
</a> rel="noreferrer"
<a className="w-fit transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
href={paths.discord()} aria-label="Docs"
target="_blank" data-tooltip-id="open-documentation"
rel="noreferrer" data-tooltip-content="Open AnythingLLM help docs"
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" >
aria-label="Join our Discord server" <BookOpen weight="fill" className="h-5 w-5 " />
> </a>
<DiscordLogo </ToolTipWrapper>
weight="fill" <ToolTipWrapper id="open-discord">
className="h-5 w-5 stroke-slate-200 group-hover:stroke-slate-200" <a
/> href={paths.discord()}
</a> target="_blank"
rel="noreferrer"
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
aria-label="Join our Discord server"
data-tooltip-id="open-discord"
data-tooltip-content="Join the AnythingLLM Discord"
>
<DiscordLogo
weight="fill"
className="h-5 w-5 stroke-slate-200 group-hover:stroke-slate-200"
/>
</a>
</ToolTipWrapper>
{!isMobile && <SettingsButton />} {!isMobile && <SettingsButton />}
</div> </div>
</div> </div>
@ -105,3 +119,17 @@ export default function Footer() {
</div> </div>
); );
} }
export function ToolTipWrapper({ id = v4(), children }) {
return (
<div className="flex w-fit">
{children}
<Tooltip
id={id}
place="top"
delayShow={300}
className="tooltip !text-xs z-99"
/>
</div>
);
}

View File

@ -1,80 +1,84 @@
export default function GenericOpenAiOptions({ settings }) { export default function GenericOpenAiOptions({ settings }) {
return ( return (
<div className="flex gap-4 flex-wrap"> <div className="flex flex-col gap-y-4">
<div className="flex flex-col w-60"> <div className="flex gap-4 flex-wrap">
<label className="text-white text-sm font-semibold block mb-4"> <div className="flex flex-col w-60">
Base URL <label className="text-white text-sm font-semibold block mb-4">
</label> Base URL
<input </label>
type="url" <input
name="GenericOpenAiBasePath" type="url"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" name="GenericOpenAiBasePath"
placeholder="eg: https://proxy.openai.com" className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
defaultValue={settings?.GenericOpenAiBasePath} placeholder="eg: https://proxy.openai.com"
required={true} defaultValue={settings?.GenericOpenAiBasePath}
autoComplete="off" required={true}
spellCheck={false} autoComplete="off"
/> spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
API Key
</label>
<input
type="password"
name="GenericOpenAiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Generic service API Key"
defaultValue={settings?.GenericOpenAiKey ? "*".repeat(20) : ""}
required={false}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Name
</label>
<input
type="text"
name="GenericOpenAiModelPref"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Model id used for chat requests"
defaultValue={settings?.GenericOpenAiModelPref}
required={true}
autoComplete="off"
/>
</div>
</div> </div>
<div className="flex flex-col w-60"> <div className="flex gap-x-4 flex-wrap">
<label className="text-white text-sm font-semibold block mb-4"> <div className="flex flex-col w-60">
API Key <label className="text-white text-sm font-semibold block mb-4">
</label> Token context window
<input </label>
type="password" <input
name="GenericOpenAiKey" type="number"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" name="GenericOpenAiTokenLimit"
placeholder="Generic service API Key" className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
defaultValue={settings?.GenericOpenAiKey ? "*".repeat(20) : ""} placeholder="Content window limit (eg: 4096)"
required={false} min={1}
autoComplete="off" onScroll={(e) => e.target.blur()}
spellCheck={false} defaultValue={settings?.GenericOpenAiTokenLimit}
/> required={true}
</div> autoComplete="off"
<div className="flex flex-col w-60"> />
<label className="text-white text-sm font-semibold block mb-4"> </div>
Chat Model Name <div className="flex flex-col w-60">
</label> <label className="text-white text-sm font-semibold block mb-4">
<input Max Tokens
type="text" </label>
name="GenericOpenAiModelPref" <input
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" type="number"
placeholder="Model id used for chat requests" name="GenericOpenAiMaxTokens"
defaultValue={settings?.GenericOpenAiModelPref} className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
required={true} placeholder="Max tokens per request (eg: 1024)"
autoComplete="off" min={1}
/> defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
</div> required={true}
<div className="flex flex-col w-60"> autoComplete="off"
<label className="text-white text-sm font-semibold block mb-4"> />
Token context window </div>
</label>
<input
type="number"
name="GenericOpenAiTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Content window limit (eg: 4096)"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.GenericOpenAiTokenLimit}
required={true}
autoComplete="off"
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Max Tokens
</label>
<input
type="number"
name="GenericOpenAiMaxTokens"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Max tokens per request (eg: 1024)"
min={1}
defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
required={true}
autoComplete="off"
/>
</div> </div>
</div> </div>
); );

View File

@ -3,7 +3,7 @@ import System from "@/models/system";
import showToast from "@/utils/toast"; import showToast from "@/utils/toast";
import pluralize from "pluralize"; import pluralize from "pluralize";
import { TagsInput } from "react-tag-input-component"; import { TagsInput } from "react-tag-input-component";
import { Warning } from "@phosphor-icons/react"; import { Info, Warning } from "@phosphor-icons/react";
import { Tooltip } from "react-tooltip"; import { Tooltip } from "react-tooltip";
const DEFAULT_BRANCHES = ["main", "master"]; const DEFAULT_BRANCHES = ["main", "master"];
@ -92,45 +92,7 @@ export default function GithubOptions() {
<p className="font-bold text-white">Github Access Token</p>{" "} <p className="font-bold text-white">Github Access Token</p>{" "}
<p className="text-xs text-white/50 font-light flex items-center"> <p className="text-xs text-white/50 font-light flex items-center">
optional optional
{!accessToken && ( <PATTooltip accessToken={accessToken} />
<Warning
size={14}
className="ml-1 text-orange-500 cursor-pointer"
data-tooltip-id="access-token-tooltip"
data-tooltip-place="right"
/>
)}
<Tooltip
delayHide={300}
id="access-token-tooltip"
className="max-w-xs"
clickable={true}
>
<p className="text-sm">
Without a{" "}
<a
href="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
Personal Access Token
</a>
, the GitHub API may limit the number of files that
can be collected due to rate limits. You can{" "}
<a
href="https://github.com/settings/personal-access-tokens/new"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
create a temporary Access Token
</a>{" "}
to avoid this issue.
</p>
</Tooltip>
</p> </p>
</label> </label>
<p className="text-xs font-normal text-white/50"> <p className="text-xs font-normal text-white/50">
@ -180,6 +142,7 @@ export default function GithubOptions() {
</div> </div>
<div className="flex flex-col gap-y-2 w-full pr-10"> <div className="flex flex-col gap-y-2 w-full pr-10">
<PATAlert accessToken={accessToken} />
<button <button
type="submit" type="submit"
disabled={loading} disabled={loading}
@ -269,3 +232,78 @@ function GitHubBranchSelection({ repo, accessToken }) {
</div> </div>
); );
} }
function PATAlert({ accessToken }) {
if (!!accessToken) return null;
return (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Info className="shrink-0" size={25} />
<p className="text-sm">
Without filling out the <b>Github Access Token</b> this data connector
will only be able to collect the <b>top-level</b> files of the repo
due to GitHub's public API rate-limits.
<br />
<br />
<a
href="https://github.com/settings/personal-access-tokens/new"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
{" "}
Get a free Personal Access Token with a GitHub account here.
</a>
</p>
</div>
</div>
);
}
function PATTooltip({ accessToken }) {
if (!!accessToken) return null;
return (
<>
{!accessToken && (
<Warning
size={14}
className="ml-1 text-orange-500 cursor-pointer"
data-tooltip-id="access-token-tooltip"
data-tooltip-place="right"
/>
)}
<Tooltip
delayHide={300}
id="access-token-tooltip"
className="max-w-xs"
clickable={true}
>
<p className="text-sm">
Without a{" "}
<a
href="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
Personal Access Token
</a>
, the GitHub API may limit the number of files that can be collected
due to rate limits. You can{" "}
<a
href="https://github.com/settings/personal-access-tokens/new"
rel="noreferrer"
target="_blank"
className="underline"
onClick={(e) => e.stopPropagation()}
>
create a temporary Access Token
</a>{" "}
to avoid this issue.
</p>
</Tooltip>
</>
);
}

View File

@ -3,6 +3,7 @@ import paths from "@/utils/paths";
import { ArrowUUpLeft, Wrench } from "@phosphor-icons/react"; import { ArrowUUpLeft, Wrench } from "@phosphor-icons/react";
import { Link } from "react-router-dom"; import { Link } from "react-router-dom";
import { useMatch } from "react-router-dom"; import { useMatch } from "react-router-dom";
import { ToolTipWrapper } from "../Footer";
export default function SettingsButton() { export default function SettingsButton() {
const isInSettings = !!useMatch("/settings/*"); const isInSettings = !!useMatch("/settings/*");
@ -12,22 +13,32 @@ export default function SettingsButton() {
if (isInSettings) if (isInSettings)
return ( return (
<Link <ToolTipWrapper id="go-home">
to={paths.home()} <Link
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" to={paths.home()}
aria-label="Home" className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
> aria-label="Home"
<ArrowUUpLeft className="h-5 w-5" weight="fill" /> data-tooltip-id="go-home"
</Link> data-tooltip-content="Back to workspaces"
>
<ArrowUUpLeft className="h-5 w-5" weight="fill" />
</Link>
</ToolTipWrapper>
); );
return ( return (
<Link <ToolTipWrapper id="open-settings">
to={!!user?.role ? paths.settings.system() : paths.settings.appearance()} <Link
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border" to={
aria-label="Settings" !!user?.role ? paths.settings.system() : paths.settings.appearance()
> }
<Wrench className="h-5 w-5" weight="fill" /> className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
</Link> aria-label="Settings"
data-tooltip-id="open-settings"
data-tooltip-content="Open settings"
>
<Wrench className="h-5 w-5" weight="fill" />
</Link>
</ToolTipWrapper>
); );
} }

View File

@ -329,7 +329,7 @@ const SidebarOptions = ({ user = null }) => (
<Option <Option
href={paths.settings.embedSetup()} href={paths.settings.embedSetup()}
childLinks={[paths.settings.embedChats()]} childLinks={[paths.settings.embedChats()]}
btnText="Embedded Chat" btnText="Chat Embed Widgets"
icon={<CodeBlock className="h-5 w-5 flex-shrink-0" />} icon={<CodeBlock className="h-5 w-5 flex-shrink-0" />}
user={user} user={user}
flex={true} flex={true}
@ -338,7 +338,7 @@ const SidebarOptions = ({ user = null }) => (
<> <>
<Option <Option
href={paths.settings.embedChats()} href={paths.settings.embedChats()}
btnText="Embedded Chat History" btnText="Chat Embed History"
icon={<Barcode className="h-5 w-5 flex-shrink-0" />} icon={<Barcode className="h-5 w-5 flex-shrink-0" />}
user={user} user={user}
flex={true} flex={true}

View File

@ -1,38 +1,89 @@
import { Gauge } from "@phosphor-icons/react"; import { Gauge } from "@phosphor-icons/react";
export default function NativeTranscriptionOptions() { import { useState } from "react";
export default function NativeTranscriptionOptions({ settings }) {
const [model, setModel] = useState(settings?.WhisperModelPref);
return ( return (
<div className="w-full flex flex-col gap-y-4"> <div className="w-full flex flex-col gap-y-4">
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2"> <LocalWarning model={model} />
<div className="gap-x-2 flex items-center">
<Gauge size={25} />
<p className="text-sm">
Using the local whisper model on machines with limited RAM or CPU
can stall AnythingLLM when processing media files.
<br />
We recommend at least 2GB of RAM and upload files &lt;10Mb.
<br />
<br />
<i>
The built-in model will automatically download on the first use.
</i>
</p>
</div>
</div>
<div className="w-full flex items-center gap-4"> <div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-4">
Model Selection Model Selection
</label> </label>
<select <select
disabled={true} name="WhisperModelPref"
defaultValue={model}
onChange={(e) => setModel(e.target.value)}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
<option disabled={true} selected={true}> {["Xenova/whisper-small", "Xenova/whisper-large"].map(
Xenova/whisper-small (value, i) => {
</option> return (
<option key={i} value={value}>
{value}
</option>
);
}
)}
</select> </select>
</div> </div>
</div> </div>
</div> </div>
); );
} }
function LocalWarning({ model }) {
switch (model) {
case "Xenova/whisper-small":
return <WhisperSmall />;
case "Xenova/whisper-large":
return <WhisperLarge />;
default:
return <WhisperSmall />;
}
}
function WhisperSmall() {
return (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Gauge size={25} />
<p className="text-sm">
Running the <b>whisper-small</b> model on a machine with limited RAM
or CPU can stall AnythingLLM when processing media files.
<br />
We recommend at least 2GB of RAM and upload files &lt;10Mb.
<br />
<br />
<i>
This model will automatically download on the first use. (250mb)
</i>
</p>
</div>
</div>
);
}
function WhisperLarge() {
return (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Gauge size={25} />
<p className="text-sm">
Using the <b>whisper-large</b> model on machines with limited RAM or
CPU can stall AnythingLLM when processing media files. This model is
substantially larger than the whisper-small.
<br />
We recommend at least 8GB of RAM and upload files &lt;10Mb.
<br />
<br />
<i>
This model will automatically download on the first use. (1.56GB)
</i>
</p>
</div>
</div>
);
}

View File

@ -12,6 +12,7 @@ import AvailableAgentsButton, {
} from "./AgentMenu"; } from "./AgentMenu";
import TextSizeButton from "./TextSizeMenu"; import TextSizeButton from "./TextSizeMenu";
import SpeechToText from "./SpeechToText"; import SpeechToText from "./SpeechToText";
import { Tooltip } from "react-tooltip";
export const PROMPT_INPUT_EVENT = "set_prompt_input"; export const PROMPT_INPUT_EVENT = "set_prompt_input";
export default function PromptInput({ export default function PromptInput({
@ -134,14 +135,25 @@ export default function PromptInput({
{buttonDisabled ? ( {buttonDisabled ? (
<StopGenerationButton /> <StopGenerationButton />
) : ( ) : (
<button <>
ref={formRef} <button
type="submit" ref={formRef}
className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4" type="submit"
> className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4"
<PaperPlaneRight className="w-7 h-7 my-3" weight="fill" /> data-tooltip-id="send-prompt"
<span className="sr-only">Send message</span> data-tooltip-content="Send prompt message to workspace"
</button> aria-label="Send prompt message to workspace"
>
<PaperPlaneRight className="w-7 h-7 my-3" weight="fill" />
<span className="sr-only">Send message</span>
</button>
<Tooltip
id="send-prompt"
place="bottom"
delayShow={300}
className="tooltip !text-xs z-99"
/>
</>
)} )}
</div> </div>
<div className="flex justify-between py-3.5"> <div className="flex justify-between py-3.5">

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -10,6 +10,8 @@ import LocalAiLogo from "@/media/llmprovider/localai.png";
import OllamaLogo from "@/media/llmprovider/ollama.png"; import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import CohereLogo from "@/media/llmprovider/cohere.png"; import CohereLogo from "@/media/llmprovider/cohere.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PreLoader from "@/components/Preloader"; import PreLoader from "@/components/Preloader";
import ChangeWarningModal from "@/components/ChangeWarning"; import ChangeWarningModal from "@/components/ChangeWarning";
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions"; import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
@ -19,6 +21,7 @@ import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbedd
import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions"; import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions"; import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions";
import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions"; import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions";
import VoyageAiOptions from "@/components/EmbeddingSelection/VoyageAiOptions";
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem"; import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -78,6 +81,13 @@ const EMBEDDERS = [
options: (settings) => <CohereEmbeddingOptions settings={settings} />, options: (settings) => <CohereEmbeddingOptions settings={settings} />,
description: "Run powerful embedding models from Cohere.", description: "Run powerful embedding models from Cohere.",
}, },
{
name: "Voyage AI",
value: "voyageai",
logo: VoyageAiLogo,
options: (settings) => <VoyageAiOptions settings={settings} />,
description: "Run powerful embedding models from Voyage AI.",
},
]; ];
export default function GeneralEmbeddingPreference() { export default function GeneralEmbeddingPreference() {

View File

@ -12,6 +12,23 @@ import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import CTAButton from "@/components/lib/CTAButton"; import CTAButton from "@/components/lib/CTAButton";
const PROVIDERS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: (settings) => <OpenAiWhisperOptions settings={settings} />,
description: "Leverage the OpenAI Whisper-large model using your API key.",
},
{
name: "AnythingLLM Built-In",
value: "local",
logo: AnythingLLMIcon,
options: (settings) => <NativeTranscriptionOptions settings={settings} />,
description: "Run a built-in whisper model on this instance privately.",
},
];
export default function TranscriptionModelPreference() { export default function TranscriptionModelPreference() {
const [saving, setSaving] = useState(false); const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false); const [hasChanges, setHasChanges] = useState(false);
@ -68,24 +85,6 @@ export default function TranscriptionModelPreference() {
fetchKeys(); fetchKeys();
}, []); }, []);
const PROVIDERS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: <OpenAiWhisperOptions settings={settings} />,
description:
"Leverage the OpenAI Whisper-large model using your API key.",
},
{
name: "AnythingLLM Built-In",
value: "local",
logo: AnythingLLMIcon,
options: <NativeTranscriptionOptions settings={settings} />,
description: "Run a built-in whisper model on this instance privately.",
},
];
useEffect(() => { useEffect(() => {
const filtered = PROVIDERS.filter((provider) => const filtered = PROVIDERS.filter((provider) =>
provider.name.toLowerCase().includes(searchQuery.toLowerCase()) provider.name.toLowerCase().includes(searchQuery.toLowerCase())
@ -228,7 +227,7 @@ export default function TranscriptionModelPreference() {
{selectedProvider && {selectedProvider &&
PROVIDERS.find( PROVIDERS.find(
(provider) => provider.value === selectedProvider (provider) => provider.value === selectedProvider
)?.options} )?.options(settings)}
</div> </div>
</div> </div>
</form> </form>

View File

@ -28,6 +28,8 @@ import LanceDbLogo from "@/media/vectordbs/lancedb.png";
import WeaviateLogo from "@/media/vectordbs/weaviate.png"; import WeaviateLogo from "@/media/vectordbs/weaviate.png";
import QDrantLogo from "@/media/vectordbs/qdrant.png"; import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png"; import MilvusLogo from "@/media/vectordbs/milvus.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import React, { useState, useEffect } from "react"; import React, { useState, useEffect } from "react";
import paths from "@/utils/paths"; import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
@ -292,6 +294,13 @@ export const EMBEDDING_ENGINE_PRIVACY = {
], ],
logo: CohereLogo, logo: CohereLogo,
}, },
voyageai: {
name: "Voyage AI",
description: [
"Data sent to Voyage AI's servers is shared according to the terms of service of voyageai.com.",
],
logo: VoyageAiLogo,
},
}; };
export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) { export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {

View File

@ -20,19 +20,23 @@ export default function ChatTemperatureSettings({
LLM Temperature LLM Temperature
</label> </label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5"> <p className="text-white text-opacity-60 text-xs font-medium py-1.5">
This setting controls how &quot;random&quot; or dynamic your chat This setting controls how &quot;creative&quot; your LLM responses will
responses will be. be.
<br /> <br />
The higher the number (1.0 maximum) the more random and incoherent. The higher the number the more creative. For some models this can lead
to incoherent responses when set too high.
<br /> <br />
<i>Recommended: {defaults.temp}</i> <br />
<i>
Most LLMs have various acceptable ranges of valid values. Consult
your LLM provider for that information.
</i>
</p> </p>
</div> </div>
<input <input
name="openAiTemp" name="openAiTemp"
type="number" type="number"
min={0.0} min={0.0}
max={1.0}
step={0.1} step={0.1}
onWheel={(e) => e.target.blur()} onWheel={(e) => e.target.blur()}
defaultValue={workspace?.openAiTemp ?? defaults.temp} defaultValue={workspace?.openAiTemp ?? defaults.temp}

View File

@ -2,7 +2,6 @@ import Workspace from "@/models/workspace";
import { castToType } from "@/utils/types"; import { castToType } from "@/utils/types";
import showToast from "@/utils/toast"; import showToast from "@/utils/toast";
import { useEffect, useRef, useState } from "react"; import { useEffect, useRef, useState } from "react";
import VectorCount from "./VectorCount";
import WorkspaceName from "./WorkspaceName"; import WorkspaceName from "./WorkspaceName";
import SuggestedChatMessages from "./SuggestedChatMessages"; import SuggestedChatMessages from "./SuggestedChatMessages";
import DeleteWorkspace from "./DeleteWorkspace"; import DeleteWorkspace from "./DeleteWorkspace";
@ -51,7 +50,6 @@ export default function GeneralInfo({ slug }) {
onSubmit={handleUpdate} onSubmit={handleUpdate}
className="w-1/2 flex flex-col gap-y-6" className="w-1/2 flex flex-col gap-y-6"
> >
<VectorCount reload={true} workspace={workspace} />
<WorkspaceName <WorkspaceName
key={workspace.slug} key={workspace.slug}
workspace={workspace} workspace={workspace}

View File

@ -28,9 +28,6 @@ export default function VectorCount({ reload, workspace }) {
return ( return (
<div> <div>
<h3 className="input-label">Number of vectors</h3> <h3 className="input-label">Number of vectors</h3>
<p className="text-white text-opacity-60 text-xs font-medium py-1">
Total number of vectors in your vector database.
</p>
<p className="text-white text-opacity-60 text-sm font-medium"> <p className="text-white text-opacity-60 text-sm font-medium">
{totalVectors} {totalVectors}
</p> </p>

View File

@ -6,6 +6,7 @@ import VectorDBIdentifier from "./VectorDBIdentifier";
import MaxContextSnippets from "./MaxContextSnippets"; import MaxContextSnippets from "./MaxContextSnippets";
import DocumentSimilarityThreshold from "./DocumentSimilarityThreshold"; import DocumentSimilarityThreshold from "./DocumentSimilarityThreshold";
import ResetDatabase from "./ResetDatabase"; import ResetDatabase from "./ResetDatabase";
import VectorCount from "./VectorCount";
export default function VectorDatabase({ workspace }) { export default function VectorDatabase({ workspace }) {
const [hasChanges, setHasChanges] = useState(false); const [hasChanges, setHasChanges] = useState(false);
@ -38,7 +39,10 @@ export default function VectorDatabase({ workspace }) {
onSubmit={handleUpdate} onSubmit={handleUpdate}
className="w-1/2 flex flex-col gap-y-6" className="w-1/2 flex flex-col gap-y-6"
> >
<VectorDBIdentifier workspace={workspace} /> <div className="flex items-start gap-x-5">
<VectorDBIdentifier workspace={workspace} />
<VectorCount reload={true} workspace={workspace} />
</div>
<MaxContextSnippets workspace={workspace} setHasChanges={setHasChanges} /> <MaxContextSnippets workspace={workspace} setHasChanges={setHasChanges} />
<DocumentSimilarityThreshold <DocumentSimilarityThreshold
workspace={workspace} workspace={workspace}

View File

@ -121,6 +121,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# COHERE_API_KEY= # COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0' # EMBEDDING_MODEL_PREF='embed-english-v3.0'
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
########################################### ###########################################
######## Vector Database Selection ######## ######## Vector Database Selection ########
########################################### ###########################################

5
server/.gitignore vendored
View File

@ -18,4 +18,7 @@ public/
# For legacy copies of repo # For legacy copies of repo
documents documents
vector-cache vector-cache
yarn-error.log yarn-error.log
# Local SSL Certs for HTTPS
sslcert

View File

@ -447,6 +447,76 @@ function apiWorkspaceEndpoints(app) {
} }
); );
app.post(
"/v1/workspace/:slug/update-pin",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Workspaces']
#swagger.description = 'Add or remove pin from a document in a workspace by its unique slug.'
#swagger.path = '/workspace/{slug}/update-pin'
#swagger.parameters['slug'] = {
in: 'path',
description: 'Unique slug of workspace to find',
required: true,
type: 'string'
}
#swagger.requestBody = {
description: 'JSON object with the document path and pin status to update.',
required: true,
type: 'object',
content: {
"application/json": {
example: {
docPath: "custom-documents/my-pdf.pdf-hash.json",
pinStatus: true
}
}
}
}
#swagger.responses[200] = {
description: 'OK',
content: {
"application/json": {
schema: {
type: 'object',
example: {
message: 'Pin status updated successfully'
}
}
}
}
}
#swagger.responses[404] = {
description: 'Document not found'
}
#swagger.responses[500] = {
description: 'Internal Server Error'
}
*/
try {
const { slug = null } = request.params;
const { docPath, pinStatus = false } = reqBody(request);
const workspace = await Workspace.get({ slug });
const document = await Document.get({
workspaceId: workspace.id,
docpath: docPath,
});
if (!document) return response.sendStatus(404).end();
await Document.update(document.id, { pinned: pinStatus });
return response
.status(200)
.json({ message: "Pin status updated successfully" })
.end();
} catch (error) {
console.error("Error processing the pin status update:", error);
return response.status(500).end();
}
}
);
app.post( app.post(
"/v1/workspace/:slug/chat", "/v1/workspace/:slug/chat",
[validApiKey], [validApiKey],

View File

@ -36,7 +36,12 @@ app.use(
}) })
); );
require("express-ws")(app); if (!!process.env.ENABLE_HTTPS) {
bootSSL(app, process.env.SERVER_PORT || 3001);
} else {
require("express-ws")(app); // load WebSockets in non-SSL mode.
}
app.use("/api", apiRouter); app.use("/api", apiRouter);
systemEndpoints(apiRouter); systemEndpoints(apiRouter);
extensionEndpoints(apiRouter); extensionEndpoints(apiRouter);
@ -109,8 +114,6 @@ app.all("*", function (_, response) {
response.sendStatus(404); response.sendStatus(404);
}); });
if (!!process.env.ENABLE_HTTPS) { // In non-https mode we need to boot at the end since the server has not yet
bootSSL(app, process.env.SERVER_PORT || 3001); // started and is `.listen`ing.
} else { if (!process.env.ENABLE_HTTPS) bootHTTP(app, process.env.SERVER_PORT || 3001);
bootHTTP(app, process.env.SERVER_PORT || 3001);
}

View File

@ -150,6 +150,8 @@ const SystemSettings = {
// - then it can be shared. // - then it can be shared.
// -------------------------------------------------------- // --------------------------------------------------------
WhisperProvider: process.env.WHISPER_PROVIDER || "local", WhisperProvider: process.env.WHISPER_PROVIDER || "local",
WhisperModelPref:
process.env.WHISPER_MODEL_PREF || "Xenova/whisper-small",
// -------------------------------------------------------- // --------------------------------------------------------
// TTS/STT Selection Settings & Configs // TTS/STT Selection Settings & Configs
@ -424,6 +426,9 @@ const SystemSettings = {
// Cohere API Keys // Cohere API Keys
CohereApiKey: !!process.env.COHERE_API_KEY, CohereApiKey: !!process.env.COHERE_API_KEY,
CohereModelPref: process.env.COHERE_MODEL_PREF, CohereModelPref: process.env.COHERE_MODEL_PREF,
// VoyageAi API Keys
VoyageAiApiKey: !!process.env.VOYAGEAI_API_KEY,
}; };
}, },

View File

@ -2000,6 +2000,69 @@
} }
} }
}, },
"/workspace/{slug}/update-pin": {
"post": {
"tags": [
"Workspaces"
],
"description": "Add or remove pin from a document in a workspace by its unique slug.",
"parameters": [
{
"name": "slug",
"in": "path",
"required": true,
"schema": {
"type": "string"
},
"description": "Unique slug of workspace to find"
},
{
"name": "Authorization",
"in": "header",
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "object",
"example": {
"message": "Pin status updated successfully"
}
}
}
}
},
"403": {
"description": "Forbidden"
},
"404": {
"description": "Document not found"
},
"500": {
"description": "Internal Server Error"
}
},
"requestBody": {
"description": "JSON object with the document path and pin status to update.",
"required": true,
"type": "object",
"content": {
"application/json": {
"example": {
"docPath": "custom-documents/my-pdf.pdf-hash.json",
"pinStatus": true
}
}
}
}
}
},
"/v1/workspace/{slug}/chat": { "/v1/workspace/{slug}/chat": {
"post": { "post": {
"tags": [ "tags": [

View File

@ -2,6 +2,7 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
const { toValidNumber } = require("../../http");
class GenericOpenAiLLM { class GenericOpenAiLLM {
constructor(embedder = null, modelPreference = null) { constructor(embedder = null, modelPreference = null) {
@ -18,7 +19,9 @@ class GenericOpenAiLLM {
}); });
this.model = this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null; modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024; this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
: 1024;
if (!this.model) if (!this.model)
throw new Error("GenericOpenAI must have a valid model set."); throw new Error("GenericOpenAI must have a valid model set.");
this.limits = { this.limits = {

View File

@ -0,0 +1,45 @@
class VoyageAiEmbedder {
constructor() {
if (!process.env.VOYAGEAI_API_KEY)
throw new Error("No Voyage AI API key was set.");
const {
VoyageEmbeddings,
} = require("@langchain/community/embeddings/voyage");
const voyage = new VoyageEmbeddings({
apiKey: process.env.VOYAGEAI_API_KEY,
});
this.voyage = voyage;
this.model = process.env.EMBEDDING_MODEL_PREF || "voyage-large-2-instruct";
// Limit of how many strings we can process in a single pass to stay with resource or network limits
this.batchSize = 128; // Voyage AI's limit per request is 128 https://docs.voyageai.com/docs/rate-limits#use-larger-batches
this.embeddingMaxChunkLength = 4000; // https://docs.voyageai.com/docs/embeddings - assume a token is roughly 4 letters with some padding
}
async embedTextInput(textInput) {
const result = await this.voyage.embedDocuments(
Array.isArray(textInput) ? textInput : [textInput],
{ modelName: this.model }
);
return result || [];
}
async embedChunks(textChunks = []) {
try {
const embeddings = await this.voyage.embedDocuments(textChunks, {
modelName: this.model,
batchSize: this.batchSize,
});
return embeddings;
} catch (error) {
console.error("Voyage AI Failed to embed:", error);
throw error;
}
}
}
module.exports = {
VoyageAiEmbedder,
};

View File

@ -1,6 +1,5 @@
const { Document } = require("../../../../models/documents"); const { Document } = require("../../../../models/documents");
const { safeJsonParse } = require("../../../http"); const { safeJsonParse } = require("../../../http");
const { validate } = require("uuid");
const { summarizeContent } = require("../utils/summarize"); const { summarizeContent } = require("../utils/summarize");
const Provider = require("../providers/ai-provider"); const Provider = require("../providers/ai-provider");

View File

@ -2,6 +2,7 @@ const OpenAI = require("openai");
const Provider = require("./ai-provider.js"); const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js"); const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js"); const UnTooled = require("./helpers/untooled.js");
const { toValidNumber } = require("../../../http/index.js");
/** /**
* The agent provider for the Generic OpenAI provider. * The agent provider for the Generic OpenAI provider.
@ -24,7 +25,9 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
this._client = client; this._client = client;
this.model = model; this.model = model;
this.verbose = true; this.verbose = true;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024; this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
: 1024;
} }
get client() { get client() {

View File

@ -12,16 +12,18 @@ function bootSSL(app, port = 3001) {
const privateKey = fs.readFileSync(process.env.HTTPS_KEY_PATH); const privateKey = fs.readFileSync(process.env.HTTPS_KEY_PATH);
const certificate = fs.readFileSync(process.env.HTTPS_CERT_PATH); const certificate = fs.readFileSync(process.env.HTTPS_CERT_PATH);
const credentials = { key: privateKey, cert: certificate }; const credentials = { key: privateKey, cert: certificate };
const server = https.createServer(credentials, app);
https server
.createServer(credentials, app)
.listen(port, async () => { .listen(port, async () => {
await setupTelemetry(); await setupTelemetry();
new CommunicationKey(true); new CommunicationKey(true);
console.log(`Primary server in HTTPS mode listening on port ${port}`); console.log(`Primary server in HTTPS mode listening on port ${port}`);
}) })
.on("error", catchSigTerms); .on("error", catchSigTerms);
return app;
require("express-ws")(app, server); // Apply same certificate + server for WSS connections
return { app, server };
} catch (e) { } catch (e) {
console.error( console.error(
`\x1b[31m[SSL BOOT FAILED]\x1b[0m ${e.message} - falling back to HTTP boot.`, `\x1b[31m[SSL BOOT FAILED]\x1b[0m ${e.message} - falling back to HTTP boot.`,
@ -46,7 +48,8 @@ function bootHTTP(app, port = 3001) {
console.log(`Primary server in HTTP mode listening on port ${port}`); console.log(`Primary server in HTTP mode listening on port ${port}`);
}) })
.on("error", catchSigTerms); .on("error", catchSigTerms);
return app;
return { app, server: null };
} }
function catchSigTerms() { function catchSigTerms() {

View File

@ -29,6 +29,7 @@ async function streamChatWithForEmbed(
const uuid = uuidv4(); const uuid = uuidv4();
const LLMConnector = getLLMProvider({ const LLMConnector = getLLMProvider({
provider: embed?.workspace?.chatProvider,
model: chatModel ?? embed.workspace?.chatModel, model: chatModel ?? embed.workspace?.chatModel,
}); });
const VectorDb = getVectorDbClass(); const VectorDb = getVectorDbClass();

View File

@ -17,6 +17,7 @@ class CollectorApi {
#attachOptions() { #attachOptions() {
return { return {
whisperProvider: process.env.WHISPER_PROVIDER || "local", whisperProvider: process.env.WHISPER_PROVIDER || "local",
WhisperModelPref: process.env.WHISPER_MODEL_PREF,
openAiKey: process.env.OPEN_AI_KEY || null, openAiKey: process.env.OPEN_AI_KEY || null,
}; };
} }

View File

@ -125,6 +125,9 @@ function getEmbeddingEngineSelection() {
case "cohere": case "cohere":
const { CohereEmbedder } = require("../EmbeddingEngines/cohere"); const { CohereEmbedder } = require("../EmbeddingEngines/cohere");
return new CohereEmbedder(); return new CohereEmbedder();
case "voyageai":
const { VoyageAiEmbedder } = require("../EmbeddingEngines/voyageAi");
return new VoyageAiEmbedder();
default: default:
return new NativeEmbedder(); return new NativeEmbedder();
} }

View File

@ -350,12 +350,23 @@ const KEY_MAPPING = {
checks: [isNotEmpty], checks: [isNotEmpty],
}, },
// VoyageAi Options
VoyageAiApiKey: {
envKey: "VOYAGEAI_API_KEY",
checks: [isNotEmpty],
},
// Whisper (transcription) providers // Whisper (transcription) providers
WhisperProvider: { WhisperProvider: {
envKey: "WHISPER_PROVIDER", envKey: "WHISPER_PROVIDER",
checks: [isNotEmpty, supportedTranscriptionProvider], checks: [isNotEmpty, supportedTranscriptionProvider],
postUpdate: [], postUpdate: [],
}, },
WhisperModelPref: {
envKey: "WHISPER_MODEL_PREF",
checks: [validLocalWhisper],
postUpdate: [],
},
// System Settings // System Settings
AuthToken: { AuthToken: {
@ -468,6 +479,16 @@ function supportedTTSProvider(input = "") {
return validSelection ? null : `${input} is not a valid TTS provider.`; return validSelection ? null : `${input} is not a valid TTS provider.`;
} }
function validLocalWhisper(input = "") {
const validSelection = [
"Xenova/whisper-small",
"Xenova/whisper-large",
].includes(input);
return validSelection
? null
: `${input} is not a valid Whisper model selection.`;
}
function supportedLLM(input = "") { function supportedLLM(input = "") {
const validSelection = [ const validSelection = [
"openai", "openai",
@ -530,6 +551,7 @@ function supportedEmbeddingModel(input = "") {
"ollama", "ollama",
"lmstudio", "lmstudio",
"cohere", "cohere",
"voyageai",
]; ];
return supported.includes(input) return supported.includes(input)
? null ? null

View File

@ -91,6 +91,11 @@ function isValidUrl(urlString = "") {
return false; return false;
} }
function toValidNumber(number = null, fallback = null) {
if (isNaN(Number(number))) return fallback;
return Number(number);
}
module.exports = { module.exports = {
reqBody, reqBody,
multiUserMode, multiUserMode,
@ -101,4 +106,5 @@ module.exports = {
parseAuthHeader, parseAuthHeader,
safeJsonParse, safeJsonParse,
isValidUrl, isValidUrl,
toValidNumber,
}; };