mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-13 02:00:10 +01:00
Merge branch 'master' into 1347-human-readable-confluence-url
This commit is contained in:
commit
cf3bfc5293
@ -1,19 +1,23 @@
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { v4 } = require("uuid");
|
||||
const defaultWhisper = "Xenova/whisper-small"; // Model Card: https://huggingface.co/Xenova/whisper-small
|
||||
const fileSize = {
|
||||
"Xenova/whisper-small": "250mb",
|
||||
"Xenova/whisper-large": "1.56GB",
|
||||
};
|
||||
|
||||
class LocalWhisper {
|
||||
constructor() {
|
||||
// Model Card: https://huggingface.co/Xenova/whisper-small
|
||||
this.model = "Xenova/whisper-small";
|
||||
constructor({ options }) {
|
||||
this.model = options?.WhisperModelPref ?? defaultWhisper;
|
||||
this.fileSize = fileSize[this.model];
|
||||
this.cacheDir = path.resolve(
|
||||
process.env.STORAGE_DIR
|
||||
? path.resolve(process.env.STORAGE_DIR, `models`)
|
||||
: path.resolve(__dirname, `../../../server/storage/models`)
|
||||
);
|
||||
|
||||
this.modelPath = path.resolve(this.cacheDir, "Xenova", "whisper-small");
|
||||
|
||||
this.modelPath = path.resolve(this.cacheDir, ...this.model.split("/"));
|
||||
// Make directory when it does not exist in existing installations
|
||||
if (!fs.existsSync(this.cacheDir))
|
||||
fs.mkdirSync(this.cacheDir, { recursive: true });
|
||||
@ -104,7 +108,7 @@ class LocalWhisper {
|
||||
async client() {
|
||||
if (!fs.existsSync(this.modelPath)) {
|
||||
this.#log(
|
||||
`The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~250MB)`
|
||||
`The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~${this.fileSize})`
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -124,6 +124,10 @@ GID='1000'
|
||||
# COHERE_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='embed-english-v3.0'
|
||||
|
||||
# EMBEDDING_ENGINE='voyageai'
|
||||
# VOYAGEAI_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
|
||||
|
||||
###########################################
|
||||
######## Vector Database Selection ########
|
||||
###########################################
|
||||
|
2
frontend/.gitignore
vendored
2
frontend/.gitignore
vendored
@ -9,10 +9,8 @@ lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
lib
|
||||
dist-ssr
|
||||
*.local
|
||||
!frontend/components/lib
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
|
@ -0,0 +1,50 @@
|
||||
export default function VoyageAiOptions({ settings }) {
|
||||
return (
|
||||
<div className="w-full flex flex-col gap-y-4">
|
||||
<div className="w-full flex items-center gap-4">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="VoyageAiApiKey"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Voyage AI API Key"
|
||||
defaultValue={settings?.VoyageAiApiKey ? "*".repeat(20) : ""}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Model Preference
|
||||
</label>
|
||||
<select
|
||||
name="EmbeddingModelPref"
|
||||
required={true}
|
||||
defaultValue={settings?.EmbeddingModelPref}
|
||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<optgroup label="Available embedding models">
|
||||
{[
|
||||
"voyage-large-2-instruct",
|
||||
"voyage-law-2",
|
||||
"voyage-code-2",
|
||||
"voyage-large-2",
|
||||
"voyage-2",
|
||||
].map((model) => {
|
||||
return (
|
||||
<option key={model} value={model}>
|
||||
{model}
|
||||
</option>
|
||||
);
|
||||
})}
|
||||
</optgroup>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
@ -14,6 +14,8 @@ import {
|
||||
import React, { useEffect, useState } from "react";
|
||||
import SettingsButton from "../SettingsButton";
|
||||
import { isMobile } from "react-device-detect";
|
||||
import { Tooltip } from "react-tooltip";
|
||||
import { v4 } from "uuid";
|
||||
|
||||
export const MAX_ICONS = 3;
|
||||
export const ICON_COMPONENTS = {
|
||||
@ -47,36 +49,48 @@ export default function Footer() {
|
||||
return (
|
||||
<div className="flex justify-center mb-2">
|
||||
<div className="flex space-x-4">
|
||||
<a
|
||||
href={paths.github()}
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Find us on Github"
|
||||
>
|
||||
<GithubLogo weight="fill" className="h-5 w-5 " />
|
||||
</a>
|
||||
<a
|
||||
href={paths.docs()}
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Docs"
|
||||
>
|
||||
<BookOpen weight="fill" className="h-5 w-5 " />
|
||||
</a>
|
||||
<a
|
||||
href={paths.discord()}
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Join our Discord server"
|
||||
>
|
||||
<DiscordLogo
|
||||
weight="fill"
|
||||
className="h-5 w-5 stroke-slate-200 group-hover:stroke-slate-200"
|
||||
/>
|
||||
</a>
|
||||
<ToolTipWrapper id="open-github">
|
||||
<a
|
||||
href={paths.github()}
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Find us on Github"
|
||||
data-tooltip-id="open-github"
|
||||
data-tooltip-content="View source code on Github"
|
||||
>
|
||||
<GithubLogo weight="fill" className="h-5 w-5 " />
|
||||
</a>
|
||||
</ToolTipWrapper>
|
||||
<ToolTipWrapper id="open-documentation">
|
||||
<a
|
||||
href={paths.docs()}
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="w-fit transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Docs"
|
||||
data-tooltip-id="open-documentation"
|
||||
data-tooltip-content="Open AnythingLLM help docs"
|
||||
>
|
||||
<BookOpen weight="fill" className="h-5 w-5 " />
|
||||
</a>
|
||||
</ToolTipWrapper>
|
||||
<ToolTipWrapper id="open-discord">
|
||||
<a
|
||||
href={paths.discord()}
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Join our Discord server"
|
||||
data-tooltip-id="open-discord"
|
||||
data-tooltip-content="Join the AnythingLLM Discord"
|
||||
>
|
||||
<DiscordLogo
|
||||
weight="fill"
|
||||
className="h-5 w-5 stroke-slate-200 group-hover:stroke-slate-200"
|
||||
/>
|
||||
</a>
|
||||
</ToolTipWrapper>
|
||||
{!isMobile && <SettingsButton />}
|
||||
</div>
|
||||
</div>
|
||||
@ -105,3 +119,17 @@ export default function Footer() {
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export function ToolTipWrapper({ id = v4(), children }) {
|
||||
return (
|
||||
<div className="flex w-fit">
|
||||
{children}
|
||||
<Tooltip
|
||||
id={id}
|
||||
place="top"
|
||||
delayShow={300}
|
||||
className="tooltip !text-xs z-99"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
@ -1,80 +1,84 @@
|
||||
export default function GenericOpenAiOptions({ settings }) {
|
||||
return (
|
||||
<div className="flex gap-4 flex-wrap">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Base URL
|
||||
</label>
|
||||
<input
|
||||
type="url"
|
||||
name="GenericOpenAiBasePath"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="eg: https://proxy.openai.com"
|
||||
defaultValue={settings?.GenericOpenAiBasePath}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
/>
|
||||
<div className="flex flex-col gap-y-4">
|
||||
<div className="flex gap-4 flex-wrap">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Base URL
|
||||
</label>
|
||||
<input
|
||||
type="url"
|
||||
name="GenericOpenAiBasePath"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="eg: https://proxy.openai.com"
|
||||
defaultValue={settings?.GenericOpenAiBasePath}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="GenericOpenAiKey"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Generic service API Key"
|
||||
defaultValue={settings?.GenericOpenAiKey ? "*".repeat(20) : ""}
|
||||
required={false}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Chat Model Name
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
name="GenericOpenAiModelPref"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Model id used for chat requests"
|
||||
defaultValue={settings?.GenericOpenAiModelPref}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="GenericOpenAiKey"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Generic service API Key"
|
||||
defaultValue={settings?.GenericOpenAiKey ? "*".repeat(20) : ""}
|
||||
required={false}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Chat Model Name
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
name="GenericOpenAiModelPref"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Model id used for chat requests"
|
||||
defaultValue={settings?.GenericOpenAiModelPref}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Token context window
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
name="GenericOpenAiTokenLimit"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Content window limit (eg: 4096)"
|
||||
min={1}
|
||||
onScroll={(e) => e.target.blur()}
|
||||
defaultValue={settings?.GenericOpenAiTokenLimit}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Max Tokens
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
name="GenericOpenAiMaxTokens"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Max tokens per request (eg: 1024)"
|
||||
min={1}
|
||||
defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
<div className="flex gap-x-4 flex-wrap">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Token context window
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
name="GenericOpenAiTokenLimit"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Content window limit (eg: 4096)"
|
||||
min={1}
|
||||
onScroll={(e) => e.target.blur()}
|
||||
defaultValue={settings?.GenericOpenAiTokenLimit}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Max Tokens
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
name="GenericOpenAiMaxTokens"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Max tokens per request (eg: 1024)"
|
||||
min={1}
|
||||
defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
@ -3,7 +3,7 @@ import System from "@/models/system";
|
||||
import showToast from "@/utils/toast";
|
||||
import pluralize from "pluralize";
|
||||
import { TagsInput } from "react-tag-input-component";
|
||||
import { Warning } from "@phosphor-icons/react";
|
||||
import { Info, Warning } from "@phosphor-icons/react";
|
||||
import { Tooltip } from "react-tooltip";
|
||||
|
||||
const DEFAULT_BRANCHES = ["main", "master"];
|
||||
@ -92,45 +92,7 @@ export default function GithubOptions() {
|
||||
<p className="font-bold text-white">Github Access Token</p>{" "}
|
||||
<p className="text-xs text-white/50 font-light flex items-center">
|
||||
optional
|
||||
{!accessToken && (
|
||||
<Warning
|
||||
size={14}
|
||||
className="ml-1 text-orange-500 cursor-pointer"
|
||||
data-tooltip-id="access-token-tooltip"
|
||||
data-tooltip-place="right"
|
||||
/>
|
||||
)}
|
||||
<Tooltip
|
||||
delayHide={300}
|
||||
id="access-token-tooltip"
|
||||
className="max-w-xs"
|
||||
clickable={true}
|
||||
>
|
||||
<p className="text-sm">
|
||||
Without a{" "}
|
||||
<a
|
||||
href="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens"
|
||||
rel="noreferrer"
|
||||
target="_blank"
|
||||
className="underline"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
Personal Access Token
|
||||
</a>
|
||||
, the GitHub API may limit the number of files that
|
||||
can be collected due to rate limits. You can{" "}
|
||||
<a
|
||||
href="https://github.com/settings/personal-access-tokens/new"
|
||||
rel="noreferrer"
|
||||
target="_blank"
|
||||
className="underline"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
create a temporary Access Token
|
||||
</a>{" "}
|
||||
to avoid this issue.
|
||||
</p>
|
||||
</Tooltip>
|
||||
<PATTooltip accessToken={accessToken} />
|
||||
</p>
|
||||
</label>
|
||||
<p className="text-xs font-normal text-white/50">
|
||||
@ -180,6 +142,7 @@ export default function GithubOptions() {
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-y-2 w-full pr-10">
|
||||
<PATAlert accessToken={accessToken} />
|
||||
<button
|
||||
type="submit"
|
||||
disabled={loading}
|
||||
@ -269,3 +232,78 @@ function GitHubBranchSelection({ repo, accessToken }) {
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function PATAlert({ accessToken }) {
|
||||
if (!!accessToken) return null;
|
||||
return (
|
||||
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
|
||||
<div className="gap-x-2 flex items-center">
|
||||
<Info className="shrink-0" size={25} />
|
||||
<p className="text-sm">
|
||||
Without filling out the <b>Github Access Token</b> this data connector
|
||||
will only be able to collect the <b>top-level</b> files of the repo
|
||||
due to GitHub's public API rate-limits.
|
||||
<br />
|
||||
<br />
|
||||
<a
|
||||
href="https://github.com/settings/personal-access-tokens/new"
|
||||
rel="noreferrer"
|
||||
target="_blank"
|
||||
className="underline"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
{" "}
|
||||
Get a free Personal Access Token with a GitHub account here.
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function PATTooltip({ accessToken }) {
|
||||
if (!!accessToken) return null;
|
||||
return (
|
||||
<>
|
||||
{!accessToken && (
|
||||
<Warning
|
||||
size={14}
|
||||
className="ml-1 text-orange-500 cursor-pointer"
|
||||
data-tooltip-id="access-token-tooltip"
|
||||
data-tooltip-place="right"
|
||||
/>
|
||||
)}
|
||||
<Tooltip
|
||||
delayHide={300}
|
||||
id="access-token-tooltip"
|
||||
className="max-w-xs"
|
||||
clickable={true}
|
||||
>
|
||||
<p className="text-sm">
|
||||
Without a{" "}
|
||||
<a
|
||||
href="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens"
|
||||
rel="noreferrer"
|
||||
target="_blank"
|
||||
className="underline"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
Personal Access Token
|
||||
</a>
|
||||
, the GitHub API may limit the number of files that can be collected
|
||||
due to rate limits. You can{" "}
|
||||
<a
|
||||
href="https://github.com/settings/personal-access-tokens/new"
|
||||
rel="noreferrer"
|
||||
target="_blank"
|
||||
className="underline"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
create a temporary Access Token
|
||||
</a>{" "}
|
||||
to avoid this issue.
|
||||
</p>
|
||||
</Tooltip>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ import paths from "@/utils/paths";
|
||||
import { ArrowUUpLeft, Wrench } from "@phosphor-icons/react";
|
||||
import { Link } from "react-router-dom";
|
||||
import { useMatch } from "react-router-dom";
|
||||
import { ToolTipWrapper } from "../Footer";
|
||||
|
||||
export default function SettingsButton() {
|
||||
const isInSettings = !!useMatch("/settings/*");
|
||||
@ -12,22 +13,32 @@ export default function SettingsButton() {
|
||||
|
||||
if (isInSettings)
|
||||
return (
|
||||
<Link
|
||||
to={paths.home()}
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Home"
|
||||
>
|
||||
<ArrowUUpLeft className="h-5 w-5" weight="fill" />
|
||||
</Link>
|
||||
<ToolTipWrapper id="go-home">
|
||||
<Link
|
||||
to={paths.home()}
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Home"
|
||||
data-tooltip-id="go-home"
|
||||
data-tooltip-content="Back to workspaces"
|
||||
>
|
||||
<ArrowUUpLeft className="h-5 w-5" weight="fill" />
|
||||
</Link>
|
||||
</ToolTipWrapper>
|
||||
);
|
||||
|
||||
return (
|
||||
<Link
|
||||
to={!!user?.role ? paths.settings.system() : paths.settings.appearance()}
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Settings"
|
||||
>
|
||||
<Wrench className="h-5 w-5" weight="fill" />
|
||||
</Link>
|
||||
<ToolTipWrapper id="open-settings">
|
||||
<Link
|
||||
to={
|
||||
!!user?.role ? paths.settings.system() : paths.settings.appearance()
|
||||
}
|
||||
className="transition-all duration-300 p-2 rounded-full text-white bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
|
||||
aria-label="Settings"
|
||||
data-tooltip-id="open-settings"
|
||||
data-tooltip-content="Open settings"
|
||||
>
|
||||
<Wrench className="h-5 w-5" weight="fill" />
|
||||
</Link>
|
||||
</ToolTipWrapper>
|
||||
);
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ const SidebarOptions = ({ user = null }) => (
|
||||
<Option
|
||||
href={paths.settings.embedSetup()}
|
||||
childLinks={[paths.settings.embedChats()]}
|
||||
btnText="Embedded Chat"
|
||||
btnText="Chat Embed Widgets"
|
||||
icon={<CodeBlock className="h-5 w-5 flex-shrink-0" />}
|
||||
user={user}
|
||||
flex={true}
|
||||
@ -338,7 +338,7 @@ const SidebarOptions = ({ user = null }) => (
|
||||
<>
|
||||
<Option
|
||||
href={paths.settings.embedChats()}
|
||||
btnText="Embedded Chat History"
|
||||
btnText="Chat Embed History"
|
||||
icon={<Barcode className="h-5 w-5 flex-shrink-0" />}
|
||||
user={user}
|
||||
flex={true}
|
||||
|
@ -1,38 +1,89 @@
|
||||
import { Gauge } from "@phosphor-icons/react";
|
||||
export default function NativeTranscriptionOptions() {
|
||||
import { useState } from "react";
|
||||
|
||||
export default function NativeTranscriptionOptions({ settings }) {
|
||||
const [model, setModel] = useState(settings?.WhisperModelPref);
|
||||
|
||||
return (
|
||||
<div className="w-full flex flex-col gap-y-4">
|
||||
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
|
||||
<div className="gap-x-2 flex items-center">
|
||||
<Gauge size={25} />
|
||||
<p className="text-sm">
|
||||
Using the local whisper model on machines with limited RAM or CPU
|
||||
can stall AnythingLLM when processing media files.
|
||||
<br />
|
||||
We recommend at least 2GB of RAM and upload files <10Mb.
|
||||
<br />
|
||||
<br />
|
||||
<i>
|
||||
The built-in model will automatically download on the first use.
|
||||
</i>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<LocalWarning model={model} />
|
||||
<div className="w-full flex items-center gap-4">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Model Selection
|
||||
</label>
|
||||
<select
|
||||
disabled={true}
|
||||
name="WhisperModelPref"
|
||||
defaultValue={model}
|
||||
onChange={(e) => setModel(e.target.value)}
|
||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<option disabled={true} selected={true}>
|
||||
Xenova/whisper-small
|
||||
</option>
|
||||
{["Xenova/whisper-small", "Xenova/whisper-large"].map(
|
||||
(value, i) => {
|
||||
return (
|
||||
<option key={i} value={value}>
|
||||
{value}
|
||||
</option>
|
||||
);
|
||||
}
|
||||
)}
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function LocalWarning({ model }) {
|
||||
switch (model) {
|
||||
case "Xenova/whisper-small":
|
||||
return <WhisperSmall />;
|
||||
case "Xenova/whisper-large":
|
||||
return <WhisperLarge />;
|
||||
default:
|
||||
return <WhisperSmall />;
|
||||
}
|
||||
}
|
||||
|
||||
function WhisperSmall() {
|
||||
return (
|
||||
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
|
||||
<div className="gap-x-2 flex items-center">
|
||||
<Gauge size={25} />
|
||||
<p className="text-sm">
|
||||
Running the <b>whisper-small</b> model on a machine with limited RAM
|
||||
or CPU can stall AnythingLLM when processing media files.
|
||||
<br />
|
||||
We recommend at least 2GB of RAM and upload files <10Mb.
|
||||
<br />
|
||||
<br />
|
||||
<i>
|
||||
This model will automatically download on the first use. (250mb)
|
||||
</i>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function WhisperLarge() {
|
||||
return (
|
||||
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
|
||||
<div className="gap-x-2 flex items-center">
|
||||
<Gauge size={25} />
|
||||
<p className="text-sm">
|
||||
Using the <b>whisper-large</b> model on machines with limited RAM or
|
||||
CPU can stall AnythingLLM when processing media files. This model is
|
||||
substantially larger than the whisper-small.
|
||||
<br />
|
||||
We recommend at least 8GB of RAM and upload files <10Mb.
|
||||
<br />
|
||||
<br />
|
||||
<i>
|
||||
This model will automatically download on the first use. (1.56GB)
|
||||
</i>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import AvailableAgentsButton, {
|
||||
} from "./AgentMenu";
|
||||
import TextSizeButton from "./TextSizeMenu";
|
||||
import SpeechToText from "./SpeechToText";
|
||||
import { Tooltip } from "react-tooltip";
|
||||
|
||||
export const PROMPT_INPUT_EVENT = "set_prompt_input";
|
||||
export default function PromptInput({
|
||||
@ -134,14 +135,25 @@ export default function PromptInput({
|
||||
{buttonDisabled ? (
|
||||
<StopGenerationButton />
|
||||
) : (
|
||||
<button
|
||||
ref={formRef}
|
||||
type="submit"
|
||||
className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4"
|
||||
>
|
||||
<PaperPlaneRight className="w-7 h-7 my-3" weight="fill" />
|
||||
<span className="sr-only">Send message</span>
|
||||
</button>
|
||||
<>
|
||||
<button
|
||||
ref={formRef}
|
||||
type="submit"
|
||||
className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4"
|
||||
data-tooltip-id="send-prompt"
|
||||
data-tooltip-content="Send prompt message to workspace"
|
||||
aria-label="Send prompt message to workspace"
|
||||
>
|
||||
<PaperPlaneRight className="w-7 h-7 my-3" weight="fill" />
|
||||
<span className="sr-only">Send message</span>
|
||||
</button>
|
||||
<Tooltip
|
||||
id="send-prompt"
|
||||
place="bottom"
|
||||
delayShow={300}
|
||||
className="tooltip !text-xs z-99"
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex justify-between py-3.5">
|
||||
|
BIN
frontend/src/media/embeddingprovider/voyageai.png
Normal file
BIN
frontend/src/media/embeddingprovider/voyageai.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 20 KiB |
@ -10,6 +10,8 @@ import LocalAiLogo from "@/media/llmprovider/localai.png";
|
||||
import OllamaLogo from "@/media/llmprovider/ollama.png";
|
||||
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
|
||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
|
||||
|
||||
import PreLoader from "@/components/Preloader";
|
||||
import ChangeWarningModal from "@/components/ChangeWarning";
|
||||
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
|
||||
@ -19,6 +21,7 @@ import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbedd
|
||||
import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
|
||||
import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions";
|
||||
import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions";
|
||||
import VoyageAiOptions from "@/components/EmbeddingSelection/VoyageAiOptions";
|
||||
|
||||
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
|
||||
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
||||
@ -78,6 +81,13 @@ const EMBEDDERS = [
|
||||
options: (settings) => <CohereEmbeddingOptions settings={settings} />,
|
||||
description: "Run powerful embedding models from Cohere.",
|
||||
},
|
||||
{
|
||||
name: "Voyage AI",
|
||||
value: "voyageai",
|
||||
logo: VoyageAiLogo,
|
||||
options: (settings) => <VoyageAiOptions settings={settings} />,
|
||||
description: "Run powerful embedding models from Voyage AI.",
|
||||
},
|
||||
];
|
||||
|
||||
export default function GeneralEmbeddingPreference() {
|
||||
|
@ -12,6 +12,23 @@ import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
||||
import CTAButton from "@/components/lib/CTAButton";
|
||||
|
||||
const PROVIDERS = [
|
||||
{
|
||||
name: "OpenAI",
|
||||
value: "openai",
|
||||
logo: OpenAiLogo,
|
||||
options: (settings) => <OpenAiWhisperOptions settings={settings} />,
|
||||
description: "Leverage the OpenAI Whisper-large model using your API key.",
|
||||
},
|
||||
{
|
||||
name: "AnythingLLM Built-In",
|
||||
value: "local",
|
||||
logo: AnythingLLMIcon,
|
||||
options: (settings) => <NativeTranscriptionOptions settings={settings} />,
|
||||
description: "Run a built-in whisper model on this instance privately.",
|
||||
},
|
||||
];
|
||||
|
||||
export default function TranscriptionModelPreference() {
|
||||
const [saving, setSaving] = useState(false);
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
@ -68,24 +85,6 @@ export default function TranscriptionModelPreference() {
|
||||
fetchKeys();
|
||||
}, []);
|
||||
|
||||
const PROVIDERS = [
|
||||
{
|
||||
name: "OpenAI",
|
||||
value: "openai",
|
||||
logo: OpenAiLogo,
|
||||
options: <OpenAiWhisperOptions settings={settings} />,
|
||||
description:
|
||||
"Leverage the OpenAI Whisper-large model using your API key.",
|
||||
},
|
||||
{
|
||||
name: "AnythingLLM Built-In",
|
||||
value: "local",
|
||||
logo: AnythingLLMIcon,
|
||||
options: <NativeTranscriptionOptions settings={settings} />,
|
||||
description: "Run a built-in whisper model on this instance privately.",
|
||||
},
|
||||
];
|
||||
|
||||
useEffect(() => {
|
||||
const filtered = PROVIDERS.filter((provider) =>
|
||||
provider.name.toLowerCase().includes(searchQuery.toLowerCase())
|
||||
@ -228,7 +227,7 @@ export default function TranscriptionModelPreference() {
|
||||
{selectedProvider &&
|
||||
PROVIDERS.find(
|
||||
(provider) => provider.value === selectedProvider
|
||||
)?.options}
|
||||
)?.options(settings)}
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
|
@ -28,6 +28,8 @@ import LanceDbLogo from "@/media/vectordbs/lancedb.png";
|
||||
import WeaviateLogo from "@/media/vectordbs/weaviate.png";
|
||||
import QDrantLogo from "@/media/vectordbs/qdrant.png";
|
||||
import MilvusLogo from "@/media/vectordbs/milvus.png";
|
||||
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
|
||||
|
||||
import React, { useState, useEffect } from "react";
|
||||
import paths from "@/utils/paths";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
@ -292,6 +294,13 @@ export const EMBEDDING_ENGINE_PRIVACY = {
|
||||
],
|
||||
logo: CohereLogo,
|
||||
},
|
||||
voyageai: {
|
||||
name: "Voyage AI",
|
||||
description: [
|
||||
"Data sent to Voyage AI's servers is shared according to the terms of service of voyageai.com.",
|
||||
],
|
||||
logo: VoyageAiLogo,
|
||||
},
|
||||
};
|
||||
|
||||
export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
|
||||
|
@ -20,19 +20,23 @@ export default function ChatTemperatureSettings({
|
||||
LLM Temperature
|
||||
</label>
|
||||
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
|
||||
This setting controls how "random" or dynamic your chat
|
||||
responses will be.
|
||||
This setting controls how "creative" your LLM responses will
|
||||
be.
|
||||
<br />
|
||||
The higher the number (1.0 maximum) the more random and incoherent.
|
||||
The higher the number the more creative. For some models this can lead
|
||||
to incoherent responses when set too high.
|
||||
<br />
|
||||
<i>Recommended: {defaults.temp}</i>
|
||||
<br />
|
||||
<i>
|
||||
Most LLMs have various acceptable ranges of valid values. Consult
|
||||
your LLM provider for that information.
|
||||
</i>
|
||||
</p>
|
||||
</div>
|
||||
<input
|
||||
name="openAiTemp"
|
||||
type="number"
|
||||
min={0.0}
|
||||
max={1.0}
|
||||
step={0.1}
|
||||
onWheel={(e) => e.target.blur()}
|
||||
defaultValue={workspace?.openAiTemp ?? defaults.temp}
|
||||
|
@ -2,7 +2,6 @@ import Workspace from "@/models/workspace";
|
||||
import { castToType } from "@/utils/types";
|
||||
import showToast from "@/utils/toast";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import VectorCount from "./VectorCount";
|
||||
import WorkspaceName from "./WorkspaceName";
|
||||
import SuggestedChatMessages from "./SuggestedChatMessages";
|
||||
import DeleteWorkspace from "./DeleteWorkspace";
|
||||
@ -51,7 +50,6 @@ export default function GeneralInfo({ slug }) {
|
||||
onSubmit={handleUpdate}
|
||||
className="w-1/2 flex flex-col gap-y-6"
|
||||
>
|
||||
<VectorCount reload={true} workspace={workspace} />
|
||||
<WorkspaceName
|
||||
key={workspace.slug}
|
||||
workspace={workspace}
|
||||
|
@ -28,9 +28,6 @@ export default function VectorCount({ reload, workspace }) {
|
||||
return (
|
||||
<div>
|
||||
<h3 className="input-label">Number of vectors</h3>
|
||||
<p className="text-white text-opacity-60 text-xs font-medium py-1">
|
||||
Total number of vectors in your vector database.
|
||||
</p>
|
||||
<p className="text-white text-opacity-60 text-sm font-medium">
|
||||
{totalVectors}
|
||||
</p>
|
@ -6,6 +6,7 @@ import VectorDBIdentifier from "./VectorDBIdentifier";
|
||||
import MaxContextSnippets from "./MaxContextSnippets";
|
||||
import DocumentSimilarityThreshold from "./DocumentSimilarityThreshold";
|
||||
import ResetDatabase from "./ResetDatabase";
|
||||
import VectorCount from "./VectorCount";
|
||||
|
||||
export default function VectorDatabase({ workspace }) {
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
@ -38,7 +39,10 @@ export default function VectorDatabase({ workspace }) {
|
||||
onSubmit={handleUpdate}
|
||||
className="w-1/2 flex flex-col gap-y-6"
|
||||
>
|
||||
<VectorDBIdentifier workspace={workspace} />
|
||||
<div className="flex items-start gap-x-5">
|
||||
<VectorDBIdentifier workspace={workspace} />
|
||||
<VectorCount reload={true} workspace={workspace} />
|
||||
</div>
|
||||
<MaxContextSnippets workspace={workspace} setHasChanges={setHasChanges} />
|
||||
<DocumentSimilarityThreshold
|
||||
workspace={workspace}
|
||||
|
@ -121,6 +121,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
|
||||
# COHERE_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='embed-english-v3.0'
|
||||
|
||||
# EMBEDDING_ENGINE='voyageai'
|
||||
# VOYAGEAI_API_KEY=
|
||||
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
|
||||
|
||||
###########################################
|
||||
######## Vector Database Selection ########
|
||||
###########################################
|
||||
|
5
server/.gitignore
vendored
5
server/.gitignore
vendored
@ -18,4 +18,7 @@ public/
|
||||
# For legacy copies of repo
|
||||
documents
|
||||
vector-cache
|
||||
yarn-error.log
|
||||
yarn-error.log
|
||||
|
||||
# Local SSL Certs for HTTPS
|
||||
sslcert
|
@ -447,6 +447,76 @@ function apiWorkspaceEndpoints(app) {
|
||||
}
|
||||
);
|
||||
|
||||
app.post(
|
||||
"/v1/workspace/:slug/update-pin",
|
||||
[validApiKey],
|
||||
async (request, response) => {
|
||||
/*
|
||||
#swagger.tags = ['Workspaces']
|
||||
#swagger.description = 'Add or remove pin from a document in a workspace by its unique slug.'
|
||||
#swagger.path = '/workspace/{slug}/update-pin'
|
||||
#swagger.parameters['slug'] = {
|
||||
in: 'path',
|
||||
description: 'Unique slug of workspace to find',
|
||||
required: true,
|
||||
type: 'string'
|
||||
}
|
||||
#swagger.requestBody = {
|
||||
description: 'JSON object with the document path and pin status to update.',
|
||||
required: true,
|
||||
type: 'object',
|
||||
content: {
|
||||
"application/json": {
|
||||
example: {
|
||||
docPath: "custom-documents/my-pdf.pdf-hash.json",
|
||||
pinStatus: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#swagger.responses[200] = {
|
||||
description: 'OK',
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: {
|
||||
type: 'object',
|
||||
example: {
|
||||
message: 'Pin status updated successfully'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#swagger.responses[404] = {
|
||||
description: 'Document not found'
|
||||
}
|
||||
#swagger.responses[500] = {
|
||||
description: 'Internal Server Error'
|
||||
}
|
||||
*/
|
||||
try {
|
||||
const { slug = null } = request.params;
|
||||
const { docPath, pinStatus = false } = reqBody(request);
|
||||
const workspace = await Workspace.get({ slug });
|
||||
|
||||
const document = await Document.get({
|
||||
workspaceId: workspace.id,
|
||||
docpath: docPath,
|
||||
});
|
||||
if (!document) return response.sendStatus(404).end();
|
||||
|
||||
await Document.update(document.id, { pinned: pinStatus });
|
||||
return response
|
||||
.status(200)
|
||||
.json({ message: "Pin status updated successfully" })
|
||||
.end();
|
||||
} catch (error) {
|
||||
console.error("Error processing the pin status update:", error);
|
||||
return response.status(500).end();
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
app.post(
|
||||
"/v1/workspace/:slug/chat",
|
||||
[validApiKey],
|
||||
|
@ -36,7 +36,12 @@ app.use(
|
||||
})
|
||||
);
|
||||
|
||||
require("express-ws")(app);
|
||||
if (!!process.env.ENABLE_HTTPS) {
|
||||
bootSSL(app, process.env.SERVER_PORT || 3001);
|
||||
} else {
|
||||
require("express-ws")(app); // load WebSockets in non-SSL mode.
|
||||
}
|
||||
|
||||
app.use("/api", apiRouter);
|
||||
systemEndpoints(apiRouter);
|
||||
extensionEndpoints(apiRouter);
|
||||
@ -109,8 +114,6 @@ app.all("*", function (_, response) {
|
||||
response.sendStatus(404);
|
||||
});
|
||||
|
||||
if (!!process.env.ENABLE_HTTPS) {
|
||||
bootSSL(app, process.env.SERVER_PORT || 3001);
|
||||
} else {
|
||||
bootHTTP(app, process.env.SERVER_PORT || 3001);
|
||||
}
|
||||
// In non-https mode we need to boot at the end since the server has not yet
|
||||
// started and is `.listen`ing.
|
||||
if (!process.env.ENABLE_HTTPS) bootHTTP(app, process.env.SERVER_PORT || 3001);
|
||||
|
@ -150,6 +150,8 @@ const SystemSettings = {
|
||||
// - then it can be shared.
|
||||
// --------------------------------------------------------
|
||||
WhisperProvider: process.env.WHISPER_PROVIDER || "local",
|
||||
WhisperModelPref:
|
||||
process.env.WHISPER_MODEL_PREF || "Xenova/whisper-small",
|
||||
|
||||
// --------------------------------------------------------
|
||||
// TTS/STT Selection Settings & Configs
|
||||
@ -424,6 +426,9 @@ const SystemSettings = {
|
||||
// Cohere API Keys
|
||||
CohereApiKey: !!process.env.COHERE_API_KEY,
|
||||
CohereModelPref: process.env.COHERE_MODEL_PREF,
|
||||
|
||||
// VoyageAi API Keys
|
||||
VoyageAiApiKey: !!process.env.VOYAGEAI_API_KEY,
|
||||
};
|
||||
},
|
||||
|
||||
|
@ -2000,6 +2000,69 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/workspace/{slug}/update-pin": {
|
||||
"post": {
|
||||
"tags": [
|
||||
"Workspaces"
|
||||
],
|
||||
"description": "Add or remove pin from a document in a workspace by its unique slug.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "slug",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Unique slug of workspace to find"
|
||||
},
|
||||
{
|
||||
"name": "Authorization",
|
||||
"in": "header",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"example": {
|
||||
"message": "Pin status updated successfully"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"403": {
|
||||
"description": "Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"description": "Document not found"
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal Server Error"
|
||||
}
|
||||
},
|
||||
"requestBody": {
|
||||
"description": "JSON object with the document path and pin status to update.",
|
||||
"required": true,
|
||||
"type": "object",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"example": {
|
||||
"docPath": "custom-documents/my-pdf.pdf-hash.json",
|
||||
"pinStatus": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/workspace/{slug}/chat": {
|
||||
"post": {
|
||||
"tags": [
|
||||
|
@ -2,6 +2,7 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
const { toValidNumber } = require("../../http");
|
||||
|
||||
class GenericOpenAiLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
@ -18,7 +19,9 @@ class GenericOpenAiLLM {
|
||||
});
|
||||
this.model =
|
||||
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
|
||||
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
|
||||
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
|
||||
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
|
||||
: 1024;
|
||||
if (!this.model)
|
||||
throw new Error("GenericOpenAI must have a valid model set.");
|
||||
this.limits = {
|
||||
|
45
server/utils/EmbeddingEngines/voyageAi/index.js
Normal file
45
server/utils/EmbeddingEngines/voyageAi/index.js
Normal file
@ -0,0 +1,45 @@
|
||||
class VoyageAiEmbedder {
|
||||
constructor() {
|
||||
if (!process.env.VOYAGEAI_API_KEY)
|
||||
throw new Error("No Voyage AI API key was set.");
|
||||
|
||||
const {
|
||||
VoyageEmbeddings,
|
||||
} = require("@langchain/community/embeddings/voyage");
|
||||
const voyage = new VoyageEmbeddings({
|
||||
apiKey: process.env.VOYAGEAI_API_KEY,
|
||||
});
|
||||
|
||||
this.voyage = voyage;
|
||||
this.model = process.env.EMBEDDING_MODEL_PREF || "voyage-large-2-instruct";
|
||||
|
||||
// Limit of how many strings we can process in a single pass to stay with resource or network limits
|
||||
this.batchSize = 128; // Voyage AI's limit per request is 128 https://docs.voyageai.com/docs/rate-limits#use-larger-batches
|
||||
this.embeddingMaxChunkLength = 4000; // https://docs.voyageai.com/docs/embeddings - assume a token is roughly 4 letters with some padding
|
||||
}
|
||||
|
||||
async embedTextInput(textInput) {
|
||||
const result = await this.voyage.embedDocuments(
|
||||
Array.isArray(textInput) ? textInput : [textInput],
|
||||
{ modelName: this.model }
|
||||
);
|
||||
return result || [];
|
||||
}
|
||||
|
||||
async embedChunks(textChunks = []) {
|
||||
try {
|
||||
const embeddings = await this.voyage.embedDocuments(textChunks, {
|
||||
modelName: this.model,
|
||||
batchSize: this.batchSize,
|
||||
});
|
||||
return embeddings;
|
||||
} catch (error) {
|
||||
console.error("Voyage AI Failed to embed:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
VoyageAiEmbedder,
|
||||
};
|
@ -1,6 +1,5 @@
|
||||
const { Document } = require("../../../../models/documents");
|
||||
const { safeJsonParse } = require("../../../http");
|
||||
const { validate } = require("uuid");
|
||||
const { summarizeContent } = require("../utils/summarize");
|
||||
const Provider = require("../providers/ai-provider");
|
||||
|
||||
|
@ -2,6 +2,7 @@ const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
const { toValidNumber } = require("../../../http/index.js");
|
||||
|
||||
/**
|
||||
* The agent provider for the Generic OpenAI provider.
|
||||
@ -24,7 +25,9 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
|
||||
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
|
||||
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
|
||||
: 1024;
|
||||
}
|
||||
|
||||
get client() {
|
||||
|
@ -12,16 +12,18 @@ function bootSSL(app, port = 3001) {
|
||||
const privateKey = fs.readFileSync(process.env.HTTPS_KEY_PATH);
|
||||
const certificate = fs.readFileSync(process.env.HTTPS_CERT_PATH);
|
||||
const credentials = { key: privateKey, cert: certificate };
|
||||
const server = https.createServer(credentials, app);
|
||||
|
||||
https
|
||||
.createServer(credentials, app)
|
||||
server
|
||||
.listen(port, async () => {
|
||||
await setupTelemetry();
|
||||
new CommunicationKey(true);
|
||||
console.log(`Primary server in HTTPS mode listening on port ${port}`);
|
||||
})
|
||||
.on("error", catchSigTerms);
|
||||
return app;
|
||||
|
||||
require("express-ws")(app, server); // Apply same certificate + server for WSS connections
|
||||
return { app, server };
|
||||
} catch (e) {
|
||||
console.error(
|
||||
`\x1b[31m[SSL BOOT FAILED]\x1b[0m ${e.message} - falling back to HTTP boot.`,
|
||||
@ -46,7 +48,8 @@ function bootHTTP(app, port = 3001) {
|
||||
console.log(`Primary server in HTTP mode listening on port ${port}`);
|
||||
})
|
||||
.on("error", catchSigTerms);
|
||||
return app;
|
||||
|
||||
return { app, server: null };
|
||||
}
|
||||
|
||||
function catchSigTerms() {
|
||||
|
@ -29,6 +29,7 @@ async function streamChatWithForEmbed(
|
||||
|
||||
const uuid = uuidv4();
|
||||
const LLMConnector = getLLMProvider({
|
||||
provider: embed?.workspace?.chatProvider,
|
||||
model: chatModel ?? embed.workspace?.chatModel,
|
||||
});
|
||||
const VectorDb = getVectorDbClass();
|
||||
|
@ -17,6 +17,7 @@ class CollectorApi {
|
||||
#attachOptions() {
|
||||
return {
|
||||
whisperProvider: process.env.WHISPER_PROVIDER || "local",
|
||||
WhisperModelPref: process.env.WHISPER_MODEL_PREF,
|
||||
openAiKey: process.env.OPEN_AI_KEY || null,
|
||||
};
|
||||
}
|
||||
|
@ -125,6 +125,9 @@ function getEmbeddingEngineSelection() {
|
||||
case "cohere":
|
||||
const { CohereEmbedder } = require("../EmbeddingEngines/cohere");
|
||||
return new CohereEmbedder();
|
||||
case "voyageai":
|
||||
const { VoyageAiEmbedder } = require("../EmbeddingEngines/voyageAi");
|
||||
return new VoyageAiEmbedder();
|
||||
default:
|
||||
return new NativeEmbedder();
|
||||
}
|
||||
|
@ -350,12 +350,23 @@ const KEY_MAPPING = {
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
|
||||
// VoyageAi Options
|
||||
VoyageAiApiKey: {
|
||||
envKey: "VOYAGEAI_API_KEY",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
|
||||
// Whisper (transcription) providers
|
||||
WhisperProvider: {
|
||||
envKey: "WHISPER_PROVIDER",
|
||||
checks: [isNotEmpty, supportedTranscriptionProvider],
|
||||
postUpdate: [],
|
||||
},
|
||||
WhisperModelPref: {
|
||||
envKey: "WHISPER_MODEL_PREF",
|
||||
checks: [validLocalWhisper],
|
||||
postUpdate: [],
|
||||
},
|
||||
|
||||
// System Settings
|
||||
AuthToken: {
|
||||
@ -468,6 +479,16 @@ function supportedTTSProvider(input = "") {
|
||||
return validSelection ? null : `${input} is not a valid TTS provider.`;
|
||||
}
|
||||
|
||||
function validLocalWhisper(input = "") {
|
||||
const validSelection = [
|
||||
"Xenova/whisper-small",
|
||||
"Xenova/whisper-large",
|
||||
].includes(input);
|
||||
return validSelection
|
||||
? null
|
||||
: `${input} is not a valid Whisper model selection.`;
|
||||
}
|
||||
|
||||
function supportedLLM(input = "") {
|
||||
const validSelection = [
|
||||
"openai",
|
||||
@ -530,6 +551,7 @@ function supportedEmbeddingModel(input = "") {
|
||||
"ollama",
|
||||
"lmstudio",
|
||||
"cohere",
|
||||
"voyageai",
|
||||
];
|
||||
return supported.includes(input)
|
||||
? null
|
||||
|
@ -91,6 +91,11 @@ function isValidUrl(urlString = "") {
|
||||
return false;
|
||||
}
|
||||
|
||||
function toValidNumber(number = null, fallback = null) {
|
||||
if (isNaN(Number(number))) return fallback;
|
||||
return Number(number);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
reqBody,
|
||||
multiUserMode,
|
||||
@ -101,4 +106,5 @@ module.exports = {
|
||||
parseAuthHeader,
|
||||
safeJsonParse,
|
||||
isValidUrl,
|
||||
toValidNumber,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user