bump render to latest (Nov 6, 2024)

This commit is contained in:
timothycarambat 2024-11-06 11:09:24 -08:00
commit 2cdf7877f2
64 changed files with 1738 additions and 102 deletions

View File

@ -33,6 +33,7 @@
"Mintplex",
"mixtral",
"moderations",
"novita",
"numpages",
"Ollama",
"Oobabooga",

View File

@ -96,6 +96,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
- [Apipie](https://apipie.ai/)
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
**Embedder models:**
@ -150,7 +151,7 @@ This monorepo consists of three main sections:
Mintplex Labs & the community maintain a number of deployment methods, scripts, and templates that you can use to run AnythingLLM locally. Refer to the table below to read how to deploy on your preferred environment or to automatically deploy.
| Docker | AWS | GCP | Digital Ocean | Render.com |
|----------------------------------------|----:|-----|---------------|------------|
|----------------------------------------|----|-----|---------------|------------|
| [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][do-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] |
| Railway | RepoCloud | Elestio |

View File

@ -118,8 +118,7 @@ function extensions(app) {
try {
const websiteDepth = require("../utils/extensions/WebsiteDepth");
const { url, depth = 1, maxLinks = 20 } = reqBody(request);
if (!validURL(url)) return { success: false, reason: "Not a valid URL." };
if (!validURL(url)) throw new Error("Not a valid URL.");
const scrapedData = await websiteDepth(url, depth, maxLinks);
response.status(200).json({ success: true, data: scrapedData });
} catch (e) {

View File

@ -38,6 +38,7 @@ class MimeDetector {
"pas",
"r",
"go",
"ino",
],
},
true
@ -47,7 +48,7 @@ class MimeDetector {
// These are file types that are not detected by the mime library and need to be processed as text files.
// You should only add file types that are not detected by the mime library, are parsable as text, and are files
// with no extension. Otherwise, their extension should be added to the overrides array.
#specialTextFileTypes = ["dockerfile", "jenkinsfile"];
#specialTextFileTypes = ["dockerfile", "jenkinsfile", "dockerignore"];
/**
* Returns the MIME type of the file. If the file has no extension found, it will be processed as a text file.

View File

@ -1,7 +1,7 @@
/** ATTN: SECURITY RESEARCHERS
* To Security researchers about to submit an SSRF report CVE - please don't.
* We are aware that the code below is does not defend against any of the thousands of ways
* you can map a hostname to another IP. The code below does not have intention of blocking this
* you can map a hostname to another IP via tunneling, hosts editing, etc. The code below does not have intention of blocking this
* and is simply to prevent the user from accidentally putting in non-valid websites, which is all this protects
* since _all urls must be submitted by the user anyway_ and cannot be done with authentication and manager or admin roles.
* If an attacker has those roles then the system is already vulnerable and this is not a primary concern.
@ -14,15 +14,29 @@
const VALID_PROTOCOLS = ["https:", "http:"];
const INVALID_OCTETS = [192, 172, 10, 127];
/**
* If an ip address is passed in the user is attempting to collector some internal service running on internal/private IP.
* This is not a security feature and simply just prevents the user from accidentally entering invalid IP addresses.
* @param {URL} param0
* @param {URL['hostname']} param0.hostname
* @returns {boolean}
*/
function isInvalidIp({ hostname }) {
const IPRegex = new RegExp(
/^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$/gi
);
// Not an IP address at all - passthrough
if (!IPRegex.test(hostname)) return false;
const [octetOne, ..._rest] = hostname.split(".");
// If fails to validate to number - abort and return as invalid.
if (isNaN(Number(octetOne))) return true;
// Allow localhost loopback and 0.0.0.0 for scraping convenience
// for locally hosted services or websites
if (["127.0.0.1", "0.0.0.0"].includes(hostname)) return false;
return INVALID_OCTETS.includes(Number(octetOne));
}

View File

@ -90,6 +90,10 @@ GID='1000'
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# LLM_PROVIDER='novita'
# NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings#key-management
# NOVITA_LLM_MODEL_PREF='gryphe/mythomax-l2-13b'
# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
@ -292,3 +296,7 @@ GID='1000'
# Disable viewing chat history from the UI and frontend APIs.
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
# DISABLE_VIEW_CHAT_HISTORY=1
# Enable simple SSO passthrough to pre-authenticate users from a third party service.
# See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
# SIMPLE_SSO_ENABLED=1

View File

@ -9,6 +9,7 @@ import PrivateRoute, {
import { ToastContainer } from "react-toastify";
import "react-toastify/dist/ReactToastify.css";
import Login from "@/pages/Login";
import SimpleSSOPassthrough from "@/pages/Login/SSO/simple";
import OnboardingFlow from "@/pages/OnboardingFlow";
import i18n from "./i18n";
@ -77,6 +78,8 @@ export default function App() {
<Routes>
<Route path="/" element={<PrivateRoute Component={Main} />} />
<Route path="/login" element={<Login />} />
<Route path="/sso/simple" element={<SimpleSSOPassthrough />} />
<Route
path="/workspace/:slug/settings/:tab"
element={<ManagerRoute Component={WorkspaceSettings} />}

View File

@ -34,8 +34,12 @@ export default function AnthropicAiOptions({ settings }) {
"claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-opus-latest",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
].map((model) => {
return (

View File

@ -1,7 +1,12 @@
import { ArrowSquareOut, Info } from "@phosphor-icons/react";
import { AWS_REGIONS } from "./regions";
import { useState } from "react";
export default function AwsBedrockLLMOptions({ settings }) {
const [useSessionToken, setUseSessionToken] = useState(
settings?.AwsBedrockLLMConnectionMethod === "sessionToken"
);
return (
<div className="w-full flex flex-col">
{!settings?.credentialsOnly && (
@ -24,6 +29,43 @@ export default function AwsBedrockLLMOptions({ settings }) {
</div>
)}
<div className="flex flex-col gap-y-2">
<input
type="hidden"
name="AwsBedrockLLMConnectionMethod"
value={useSessionToken ? "sessionToken" : "iam"}
/>
<div className="flex flex-col w-full">
<label className="text-white text-sm font-semibold block mb-3">
Use session token
</label>
<p className="text-white/50 text-sm">
Select the method to authenticate with AWS Bedrock.
</p>
</div>
<div className="flex items-center justify-start gap-x-4 bg-zinc-900 p-2.5 rounded-lg w-fit">
<span
className={`text-sm ${!useSessionToken ? "text-white" : "text-white/50"}`}
>
IAM
</span>
<label className="relative inline-flex items-center cursor-pointer">
<input
type="checkbox"
className="sr-only peer"
checked={useSessionToken}
onChange={(e) => setUseSessionToken(e.target.checked)}
/>
<div className="w-11 h-6 bg-zinc-700 peer-focus:outline-none rounded-full peer peer-checked:after:translate-x-full after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:rounded-full after:h-5 after:w-5 after:transition-all peer-checked:bg-primary-button"></div>
</label>
<span
className={`text-sm ${useSessionToken ? "text-white" : "text-white/50"}`}
>
Session Token
</span>
</div>
</div>
<div className="w-full flex items-center gap-[36px] my-1.5">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
@ -59,6 +101,25 @@ export default function AwsBedrockLLMOptions({ settings }) {
spellCheck={false}
/>
</div>
{useSessionToken && (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
AWS Bedrock Session Token
</label>
<input
type="password"
name="AwsBedrockLLMSessionToken"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="AWS Bedrock Session Token"
defaultValue={
settings?.AwsBedrockLLMSessionToken ? "*".repeat(20) : ""
}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
)}
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
AWS region

View File

@ -0,0 +1,142 @@
import System from "@/models/system";
import { CaretDown, CaretUp } from "@phosphor-icons/react";
import { useState, useEffect } from "react";
export default function NovitaLLMOptions({ settings }) {
return (
<div className="flex flex-col gap-y-4 mt-1.5">
<div className="flex gap-[36px]">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Novita API Key
</label>
<input
type="password"
name="NovitaLLMApiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="Novita API Key"
defaultValue={settings?.NovitaLLMApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<NovitaModelSelection settings={settings} />
)}
</div>
<AdvancedControls settings={settings} />
</div>
);
}
function AdvancedControls({ settings }) {
const [showAdvancedControls, setShowAdvancedControls] = useState(false);
return (
<div className="flex flex-col gap-y-4">
<button
type="button"
onClick={() => setShowAdvancedControls(!showAdvancedControls)}
className="text-white hover:text-white/70 flex items-center text-sm"
>
{showAdvancedControls ? "Hide" : "Show"} advanced controls
{showAdvancedControls ? (
<CaretUp size={14} className="ml-1" />
) : (
<CaretDown size={14} className="ml-1" />
)}
</button>
<div hidden={!showAdvancedControls}>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Stream Timeout (ms)
</label>
<input
type="number"
name="NovitaLLMTimeout"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="Timeout value between token responses to auto-timeout the stream"
defaultValue={settings?.NovitaLLMTimeout ?? 500}
autoComplete="off"
onScroll={(e) => e.target.blur()}
min={500}
step={1}
/>
</div>
</div>
</div>
);
}
function NovitaModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
setLoading(true);
const { models } = await System.customModels("novita");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
findCustomModels();
}, []);
if (loading || Object.keys(groupedModels).length === 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="NovitaLLMModelPref"
disabled={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="NovitaLLMModelPref"
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.NovitaLLMModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}

View File

@ -25,9 +25,13 @@ const PROVIDER_DEFAULT_MODELS = {
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-latest",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
],
azure: [],
@ -63,7 +67,13 @@ function groupModels(models) {
}, {});
}
const groupedProviders = ["togetherai", "fireworksai", "openai", "openrouter"];
const groupedProviders = [
"togetherai",
"fireworksai",
"openai",
"novita",
"openrouter",
];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
const [customModels, setCustomModels] = useState([]);

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@ -706,6 +706,30 @@ const System = {
);
return { viewable: isViewable, error: null };
},
/**
* Validates a temporary auth token and logs in the user if the token is valid.
* @param {string} publicToken - the token to validate against
* @returns {Promise<{valid: boolean, user: import("@prisma/client").users | null, token: string | null, message: string | null}>}
*/
simpleSSOLogin: async function (publicToken) {
return fetch(`${API_BASE}/request-token/sso/simple?token=${publicToken}`, {
method: "GET",
})
.then(async (res) => {
if (!res.ok) {
const text = await res.text();
if (!text.startsWith("{")) throw new Error(text);
return JSON.parse(text);
}
return await res.json();
})
.catch((e) => {
console.error(e);
return { valid: false, user: null, token: null, message: e.message };
});
},
experimentalFeatures: {
liveSync: LiveDocumentSync,
agentPlugins: AgentPlugins,

View File

@ -316,3 +316,13 @@ export function TavilySearchOptions({ settings }) {
</>
);
}
export function DuckDuckGoOptions() {
return (
<>
<p className="text-sm text-white/60 my-2">
DuckDuckGo is ready to use without any additional configuration.
</p>
</>
);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 218 KiB

View File

@ -8,6 +8,7 @@ import BingSearchIcon from "./icons/bing.png";
import SerplySearchIcon from "./icons/serply.png";
import SearXNGSearchIcon from "./icons/searxng.png";
import TavilySearchIcon from "./icons/tavily.svg";
import DuckDuckGoIcon from "./icons/duckduckgo.png";
import {
CaretUpDown,
MagnifyingGlass,
@ -24,6 +25,7 @@ import {
SerplySearchOptions,
SearXNGOptions,
TavilySearchOptions,
DuckDuckGoOptions,
} from "./SearchProviderOptions";
const SEARCH_PROVIDERS = [
@ -35,6 +37,14 @@ const SEARCH_PROVIDERS = [
description:
"Web search will be disabled until a provider and keys are provided.",
},
{
name: "DuckDuckGo",
value: "duckduckgo-engine",
logo: DuckDuckGoIcon,
options: () => <DuckDuckGoOptions />,
description:
"Free and privacy-focused web search using DuckDuckGo's HTML interface.",
},
{
name: "Google Search Engine",
value: "google-search-engine",

View File

@ -10,6 +10,7 @@ import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import NovitaLogo from "@/media/llmprovider/novita.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
@ -37,6 +38,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import FireworksAiOptions from "@/components/LLMSelection/FireworksAiOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
@ -111,6 +113,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run LLMs locally on your own machine.",
requiredConfig: ["OllamaLLMBasePath"],
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
requiredConfig: ["NovitaLLMApiKey"],
},
{
name: "LM Studio",
value: "lmstudio",

View File

@ -0,0 +1,54 @@
import React, { useEffect, useState } from "react";
import { FullScreenLoader } from "@/components/Preloader";
import { Navigate } from "react-router-dom";
import paths from "@/utils/paths";
import useQuery from "@/hooks/useQuery";
import System from "@/models/system";
import { AUTH_TIMESTAMP, AUTH_TOKEN, AUTH_USER } from "@/utils/constants";
export default function SimpleSSOPassthrough() {
const query = useQuery();
const redirectPath = query.get("redirectTo") || paths.home();
const [ready, setReady] = useState(false);
const [error, setError] = useState(null);
useEffect(() => {
try {
if (!query.get("token")) throw new Error("No token provided.");
// Clear any existing auth data
window.localStorage.removeItem(AUTH_USER);
window.localStorage.removeItem(AUTH_TOKEN);
window.localStorage.removeItem(AUTH_TIMESTAMP);
System.simpleSSOLogin(query.get("token"))
.then((res) => {
if (!res.valid) throw new Error(res.message);
window.localStorage.setItem(AUTH_USER, JSON.stringify(res.user));
window.localStorage.setItem(AUTH_TOKEN, res.token);
window.localStorage.setItem(AUTH_TIMESTAMP, Number(new Date()));
setReady(res.valid);
})
.catch((e) => {
setError(e.message);
});
} catch (e) {
setError(e.message);
}
}, []);
if (error)
return (
<div className="w-screen h-screen overflow-hidden bg-sidebar flex items-center justify-center flex-col gap-4">
<p className="text-white font-mono text-lg">{error}</p>
<p className="text-white/80 font-mono text-sm">
Please contact the system administrator about this error.
</p>
</div>
);
if (ready) return <Navigate to={redirectPath} />;
// Loading state by default
return <FullScreenLoader />;
}

View File

@ -15,6 +15,7 @@ import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import NovitaLogo from "@/media/llmprovider/novita.png";
import GroqLogo from "@/media/llmprovider/groq.png";
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
@ -149,6 +150,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: OpenRouterLogo,
},
novita: {
name: "Novita AI",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in response creation are visible to Novita AI",
],
logo: NovitaLogo,
},
groq: {
name: "Groq",
description: [

View File

@ -21,6 +21,7 @@ import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import NovitaLogo from "@/media/llmprovider/novita.png";
import XAILogo from "@/media/llmprovider/xai.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
@ -46,6 +47,7 @@ import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
@ -102,6 +104,14 @@ const LLMS = [
options: (settings) => <OllamaLLMOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
},
{
name: "LM Studio",
value: "lmstudio",

View File

@ -17,6 +17,7 @@ const ENABLED_PROVIDERS = [
"koboldcpp",
"togetherai",
"openrouter",
"novita",
"mistral",
"perplexity",
"textgenwebui",
@ -40,6 +41,7 @@ const WARN_PERFORMANCE = [
"ollama",
"localai",
"openrouter",
"novita",
"generic-openai",
"textgenwebui",
];

View File

@ -85,6 +85,7 @@ AnythingLLMのいくつかのクールな機能
- [Fireworks AI (チャットモデル)](https://fireworks.ai/)
- [Perplexity (チャットモデル)](https://www.perplexity.ai/)
- [OpenRouter (チャットモデル)](https://openrouter.ai/)
- [Novita AI (チャットモデル)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [Mistral](https://mistral.ai/)
- [Groq](https://groq.com/)
- [Cohere](https://cohere.com/)

View File

@ -81,6 +81,7 @@ AnythingLLM的一些酷炫特性
- [Fireworks AI (聊天模型)](https://fireworks.ai/)
- [Perplexity (聊天模型)](https://www.perplexity.ai/)
- [OpenRouter (聊天模型)](https://openrouter.ai/)
- [Novita AI (聊天模型)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [Mistral](https://mistral.ai/)
- [Groq](https://groq.com/)
- [Cohere](https://cohere.com/)

View File

@ -91,6 +91,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# LLM_PROVIDER='novita'
# NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings#key-management
# NOVITA_LLM_MODEL_PREF='gryphe/mythomax-l2-13b'
# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
@ -281,3 +285,7 @@ TTS_PROVIDER="native"
# Disable viewing chat history from the UI and frontend APIs.
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
# DISABLE_VIEW_CHAT_HISTORY=1
# Enable simple SSO passthrough to pre-authenticate users from a third party service.
# See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
# SIMPLE_SSO_ENABLED=1

View File

@ -1,6 +1,6 @@
const { Telemetry } = require("../../../models/telemetry");
const { validApiKey } = require("../../../utils/middleware/validApiKey");
const { handleFileUpload } = require("../../../utils/files/multer");
const { handleAPIFileUpload } = require("../../../utils/files/multer");
const {
viewLocalFiles,
findDocumentInDocuments,
@ -23,7 +23,7 @@ function apiDocumentEndpoints(app) {
app.post(
"/v1/document/upload",
[validApiKey, handleFileUpload],
[validApiKey, handleAPIFileUpload],
async (request, response) => {
/*
#swagger.tags = ['Documents']

View File

@ -1,5 +1,9 @@
const { User } = require("../../../models/user");
const { TemporaryAuthToken } = require("../../../models/temporaryAuthToken");
const { multiUserMode } = require("../../../utils/http");
const {
simpleSSOEnabled,
} = require("../../../utils/middleware/simpleSSOEnabled");
const { validApiKey } = require("../../../utils/middleware/validApiKey");
function apiUserManagementEndpoints(app) {
@ -59,6 +63,62 @@ function apiUserManagementEndpoints(app) {
response.sendStatus(500).end();
}
});
app.get(
"/v1/users/:id/issue-auth-token",
[validApiKey, simpleSSOEnabled],
async (request, response) => {
/*
#swagger.tags = ['User Management']
#swagger.description = 'Issue a temporary auth token for a user'
#swagger.parameters['id'] = {
in: 'path',
description: 'The ID of the user to issue a temporary auth token for',
required: true,
type: 'string'
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
token: "1234567890",
loginPath: "/sso/simple?token=1234567890"
}
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
#swagger.responses[401] = {
description: "Instance is not in Multi-User mode. Permission denied.",
}
*/
try {
const { id: userId } = request.params;
const user = await User.get({ id: Number(userId) });
if (!user)
return response.status(404).json({ error: "User not found" });
const { token, error } = await TemporaryAuthToken.issue(userId);
if (error) return response.status(500).json({ error: error });
response.status(200).json({
token: String(token),
loginPath: `/sso/simple?token=${token}`,
});
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
}
module.exports = { apiUserManagementEndpoints };

View File

@ -150,7 +150,8 @@ function apiWorkspaceEndpoints(app) {
schema: {
type: 'object',
example: {
workspace: {
workspace: [
{
"id": 79,
"name": "My workspace",
"slug": "my-workspace-123",
@ -162,6 +163,7 @@ function apiWorkspaceEndpoints(app) {
"documents": [],
"threads": []
}
]
}
}
}
@ -339,6 +341,24 @@ function apiWorkspaceEndpoints(app) {
required: true,
type: 'string'
}
#swagger.parameters['apiSessionId'] = {
in: 'query',
description: 'Optional apiSessionId to filter by',
required: false,
type: 'string'
}
#swagger.parameters['limit'] = {
in: 'query',
description: 'Optional number of chat messages to return (default: 100)',
required: false,
type: 'integer'
}
#swagger.parameters['orderBy'] = {
in: 'query',
description: 'Optional order of chat messages (asc or desc)',
required: false,
type: 'string'
}
#swagger.responses[200] = {
content: {
"application/json": {
@ -370,6 +390,11 @@ function apiWorkspaceEndpoints(app) {
*/
try {
const { slug } = request.params;
const {
apiSessionId = null,
limit = 100,
orderBy = "desc",
} = request.query;
const workspace = await Workspace.get({ slug });
if (!workspace) {
@ -377,7 +402,21 @@ function apiWorkspaceEndpoints(app) {
return;
}
const history = await WorkspaceChats.forWorkspace(workspace.id);
const validLimit = Math.max(1, parseInt(limit));
const validOrderBy = ["asc", "desc"].includes(orderBy)
? orderBy
: "desc";
const history = apiSessionId
? await WorkspaceChats.forWorkspaceByApiSessionId(
workspace.id,
apiSessionId,
validLimit,
{ createdAt: validOrderBy }
)
: await WorkspaceChats.forWorkspace(workspace.id, validLimit, {
createdAt: validOrderBy,
});
response.status(200).json({ history: convertToChatHistory(history) });
} catch (e) {
console.error(e.message, e);

View File

@ -58,6 +58,8 @@ const { BrowserExtensionApiKey } = require("../models/browserExtensionApiKey");
const {
chatHistoryViewable,
} = require("../utils/middleware/chatHistoryViewable");
const { simpleSSOEnabled } = require("../utils/middleware/simpleSSOEnabled");
const { TemporaryAuthToken } = require("../models/temporaryAuthToken");
function systemEndpoints(app) {
if (!app) return;
@ -256,6 +258,49 @@ function systemEndpoints(app) {
}
});
app.get(
"/request-token/sso/simple",
[simpleSSOEnabled],
async (request, response) => {
const { token: tempAuthToken } = request.query;
const { sessionToken, token, error } =
await TemporaryAuthToken.validate(tempAuthToken);
if (error) {
await EventLogs.logEvent("failed_login_invalid_temporary_auth_token", {
ip: request.ip || "Unknown IP",
multiUserMode: true,
});
return response.status(401).json({
valid: false,
token: null,
message: `[001] An error occurred while validating the token: ${error}`,
});
}
await Telemetry.sendTelemetry(
"login_event",
{ multiUserMode: true },
token.user.id
);
await EventLogs.logEvent(
"login_event",
{
ip: request.ip || "Unknown IP",
username: token.user.username || "Unknown user",
},
token.user.id
);
response.status(200).json({
valid: true,
user: User.filterFields(token.user),
token: sessionToken,
message: null,
});
}
);
app.post(
"/system/recover-account",
[isMultiUserSetup],

View File

@ -105,6 +105,7 @@ const SystemSettings = {
"serply-engine",
"searxng-engine",
"tavily-search",
"duckduckgo-engine",
].includes(update)
)
throw new Error("Invalid SERP provider.");
@ -453,6 +454,11 @@ const SystemSettings = {
OllamaLLMKeepAliveSeconds: process.env.OLLAMA_KEEP_ALIVE_TIMEOUT ?? 300,
OllamaLLMPerformanceMode: process.env.OLLAMA_PERFORMANCE_MODE ?? "base",
// Novita LLM Keys
NovitaLLMApiKey: !!process.env.NOVITA_LLM_API_KEY,
NovitaLLMModelPref: process.env.NOVITA_LLM_MODEL_PREF,
NovitaLLMTimeout: process.env.NOVITA_LLM_TIMEOUT_MS,
// TogetherAI Keys
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
@ -510,8 +516,11 @@ const SystemSettings = {
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
AwsBedrockLLMConnectionMethod:
process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam",
AwsBedrockLLMAccessKeyId: !!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
AwsBedrockLLMAccessKey: !!process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
AwsBedrockLLMSessionToken: !!process.env.AWS_BEDROCK_LLM_SESSION_TOKEN,
AwsBedrockLLMRegion: process.env.AWS_BEDROCK_LLM_REGION,
AwsBedrockLLMModel: process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE,
AwsBedrockLLMTokenLimit: process.env.AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT,

View File

@ -0,0 +1,104 @@
const { makeJWT } = require("../utils/http");
const prisma = require("../utils/prisma");
/**
* Temporary auth tokens are used for simple SSO.
* They simply enable the ability for a time-based token to be used in the query of the /sso/login URL
* to login as a user without the need of a username and password. These tokens are single-use and expire.
*/
const TemporaryAuthToken = {
expiry: 1000 * 60 * 6, // 1 hour
tablename: "temporary_auth_tokens",
writable: [],
makeTempToken: () => {
const uuidAPIKey = require("uuid-apikey");
return `allm-tat-${uuidAPIKey.create().apiKey}`;
},
/**
* Issues a temporary auth token for a user via its ID.
* @param {number} userId
* @returns {Promise<{token: string|null, error: string | null}>}
*/
issue: async function (userId = null) {
if (!userId)
throw new Error("User ID is required to issue a temporary auth token.");
await this.invalidateUserTokens(userId);
try {
const token = this.makeTempToken();
const expiresAt = new Date(Date.now() + this.expiry);
await prisma.temporary_auth_tokens.create({
data: {
token,
expiresAt,
userId: Number(userId),
},
});
return { token, error: null };
} catch (error) {
console.error("FAILED TO CREATE TEMPORARY AUTH TOKEN.", error.message);
return { token: null, error: error.message };
}
},
/**
* Invalidates (deletes) all temporary auth tokens for a user via their ID.
* @param {number} userId
* @returns {Promise<boolean>}
*/
invalidateUserTokens: async function (userId) {
if (!userId)
throw new Error(
"User ID is required to invalidate temporary auth tokens."
);
await prisma.temporary_auth_tokens.deleteMany({
where: { userId: Number(userId) },
});
return true;
},
/**
* Validates a temporary auth token and returns the session token
* to be set in the browser localStorage for authentication.
* @param {string} publicToken - the token to validate against
* @returns {Promise<{sessionToken: string|null, token: import("@prisma/client").temporary_auth_tokens & {user: import("@prisma/client").users} | null, error: string | null}>}
*/
validate: async function (publicToken = "") {
/** @type {import("@prisma/client").temporary_auth_tokens & {user: import("@prisma/client").users} | undefined | null} **/
let token;
try {
if (!publicToken)
throw new Error(
"Public token is required to validate a temporary auth token."
);
token = await prisma.temporary_auth_tokens.findUnique({
where: { token: String(publicToken) },
include: { user: true },
});
if (!token) throw new Error("Invalid token.");
if (token.expiresAt < new Date()) throw new Error("Token expired.");
if (token.user.suspended) throw new Error("User account suspended.");
// Create a new session token for the user valid for 30 days
const sessionToken = makeJWT(
{ id: token.user.id, username: token.user.username },
"30d"
);
return { sessionToken, token, error: null };
} catch (error) {
console.error("FAILED TO VALIDATE TEMPORARY AUTH TOKEN.", error.message);
return { sessionToken: null, token: null, error: error.message };
} finally {
// Delete the token after it has been used under all circumstances if it was retrieved
if (token)
await prisma.temporary_auth_tokens.delete({ where: { id: token.id } });
}
},
};
module.exports = { TemporaryAuthToken };

View File

@ -55,6 +55,31 @@ const WorkspaceChats = {
}
},
forWorkspaceByApiSessionId: async function (
workspaceId = null,
apiSessionId = null,
limit = null,
orderBy = null
) {
if (!workspaceId || !apiSessionId) return [];
try {
const chats = await prisma.workspace_chats.findMany({
where: {
workspaceId,
user_id: null,
api_session_id: String(apiSessionId),
thread_id: null,
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
forWorkspace: async function (
workspaceId = null,
limit = null,

View File

@ -19,7 +19,7 @@
"seed": "node prisma/seed.js"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.16.1",
"@anthropic-ai/sdk": "^0.32.1",
"@azure/openai": "1.0.0-beta.10",
"@datastax/astra-db-ts": "^0.1.3",
"@google/generative-ai": "^0.7.1",

View File

@ -0,0 +1,12 @@
-- CreateTable
CREATE TABLE "temporary_auth_tokens" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"token" TEXT NOT NULL,
"userId" INTEGER NOT NULL,
"expiresAt" DATETIME NOT NULL,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "temporary_auth_tokens_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-- CreateIndex
CREATE UNIQUE INDEX "temporary_auth_tokens_token_key" ON "temporary_auth_tokens"("token");

View File

@ -0,0 +1,5 @@
-- CreateIndex
CREATE INDEX "temporary_auth_tokens_token_idx" ON "temporary_auth_tokens"("token");
-- CreateIndex
CREATE INDEX "temporary_auth_tokens_userId_idx" ON "temporary_auth_tokens"("userId");

View File

@ -78,6 +78,7 @@ model users {
workspace_agent_invocations workspace_agent_invocations[]
slash_command_presets slash_command_presets[]
browser_extension_api_keys browser_extension_api_keys[]
temporary_auth_tokens temporary_auth_tokens[]
}
model recovery_codes {
@ -311,3 +312,15 @@ model browser_extension_api_keys {
@@index([user_id])
}
model temporary_auth_tokens {
id Int @id @default(autoincrement())
token String @unique
userId Int
expiresAt DateTime
createdAt DateTime @default(now())
user users @relation(fields: [userId], references: [id], onDelete: Cascade)
@@index([token])
@@index([userId])
}

View File

@ -3,3 +3,4 @@ downloaded/*
!downloaded/.placeholder
openrouter
apipie
novita

View File

@ -1473,7 +1473,8 @@
"schema": {
"type": "object",
"example": {
"workspace": {
"workspace": [
{
"id": 79,
"name": "My workspace",
"slug": "my-workspace-123",
@ -1485,6 +1486,7 @@
"documents": [],
"threads": []
}
]
}
}
}
@ -1649,6 +1651,33 @@
"type": "string"
},
"description": "Unique slug of workspace to find"
},
{
"name": "apiSessionId",
"in": "query",
"description": "Optional apiSessionId to filter by",
"required": false,
"schema": {
"type": "string"
}
},
{
"name": "limit",
"in": "query",
"description": "Optional number of chat messages to return (default: 100)",
"required": false,
"schema": {
"type": "integer"
}
},
{
"name": "orderBy",
"in": "query",
"description": "Optional order of chat messages (asc or desc)",
"required": false,
"schema": {
"type": "string"
}
}
],
"responses": {
@ -2868,6 +2897,65 @@
}
}
},
"/v1/users/{id}/issue-auth-token": {
"get": {
"tags": [
"User Management"
],
"description": "Issue a temporary auth token for a user",
"parameters": [
{
"name": "id",
"in": "path",
"required": true,
"schema": {
"type": "string"
},
"description": "The ID of the user to issue a temporary auth token for"
}
],
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "object",
"example": {
"token": "1234567890",
"loginPath": "/sso/simple?token=1234567890"
}
}
}
}
},
"401": {
"description": "Instance is not in Multi-User mode. Permission denied."
},
"403": {
"description": "Forbidden",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
},
"application/xml": {
"schema": {
"$ref": "#/components/schemas/InvalidAPIKey"
}
}
}
},
"404": {
"description": "Not Found"
},
"500": {
"description": "Internal Server Error"
}
}
}
},
"/v1/openai/models": {
"get": {
"tags": [

View File

@ -46,9 +46,13 @@ class AnthropicLLM {
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-latest",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
];
return validModels.includes(modelName);

View File

@ -31,6 +31,14 @@ class AWSBedrockLLM {
if (!process.env.AWS_BEDROCK_LLM_REGION)
throw new Error("No AWS Bedrock LLM region was set.");
if (
process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD === "sessionToken" &&
!process.env.AWS_BEDROCK_LLM_SESSION_TOKEN
)
throw new Error(
"No AWS Bedrock LLM session token was set while using session token as the authentication method."
);
this.model =
modelPreference || process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE;
this.limits = {
@ -41,6 +49,20 @@ class AWSBedrockLLM {
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.#log(
`Loaded with model: ${this.model}. Will communicate with AWS Bedrock using ${this.authMethod} authentication.`
);
}
/**
* Get the authentication method for the AWS Bedrock LLM.
* There are only two valid values for this setting - anything else will default to "iam".
* @returns {"iam"|"sessionToken"}
*/
get authMethod() {
const method = process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam";
if (!["iam", "sessionToken"].includes(method)) return "iam";
return method;
}
#bedrockClient({ temperature = 0.7 }) {
@ -51,6 +73,9 @@ class AWSBedrockLLM {
credentials: {
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
...(this.authMethod === "sessionToken"
? { sessionToken: process.env.AWS_BEDROCK_LLM_SESSION_TOKEN }
: {}),
},
temperature,
});

View File

@ -7,9 +7,16 @@ const MODEL_MAP = {
"claude-instant-1.2": 100_000,
"claude-2.0": 100_000,
"claude-2.1": 200_000,
"claude-3-opus-20240229": 200_000,
"claude-3-sonnet-20240229": 200_000,
"claude-3-haiku-20240307": 200_000,
"claude-3-sonnet-20240229": 200_000,
"claude-3-opus-20240229": 200_000,
"claude-3-haiku-20240307": 200_000,
"claude-3-sonnet-20240229": 200_000,
"claude-3-opus-latest": 200_000,
"claude-3-5-haiku-latest": 200_000,
"claude-3-5-haiku-20241022": 200_000,
"claude-3-5-sonnet-latest": 200_000,
"claude-3-5-sonnet-20241022": 200_000,
"claude-3-5-sonnet-20240620": 200_000,
},
cohere: {

View File

@ -0,0 +1,376 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { v4: uuidv4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "novita")
: path.resolve(__dirname, `../../../storage/models/novita`)
);
class NovitaLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.NOVITA_LLM_API_KEY)
throw new Error("No Novita API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://api.novita.ai/v3/openai";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.NOVITA_LLM_API_KEY ?? null,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-Novita-Source": "anythingllm",
},
});
this.model =
modelPreference ||
process.env.NOVITA_LLM_MODEL_PREF ||
"gryphe/mythomax-l2-13b";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.timeout = this.#parseTimeout();
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log(`Loaded with model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
/**
* Novita has various models that never return `finish_reasons` and thus leave the stream open
* which causes issues in subsequent messages. This timeout value forces us to close the stream after
* x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
* @returns {number} The timeout value in milliseconds (default: 500)
*/
#parseTimeout() {
if (isNaN(Number(process.env.NOVITA_LLM_TIMEOUT_MS))) return 500;
const setValue = Number(process.env.NOVITA_LLM_TIMEOUT_MS);
if (setValue < 500) return 500;
return setValue;
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// The Novita model API has a lot of models, so we cache this locally in the directory
// as if the cache directory JSON file is stale or does not exist we will fetch from API and store it.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from Novita API.");
await fetchNovitaModels();
return;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Novita chat: ${this.model} is not valid for chat completion!`
);
const result = await this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
});
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
return null;
return result.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Novita chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
});
return streamRequest;
}
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = this.timeout;
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null; // null when first token is still not received.
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => clientAbortedHandler(resolve, fullText);
response.on("close", handleAbort);
// NOTICE: Not all Novita models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
// no new chunks then we kill the stream and assume it to be complete. Novita is quite fast
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
// we find it is too aggressive.
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
this.log(
`Novita stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
resolve(fullText);
}
}, 500);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
lastChunkTime = Number(new Date());
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchNovitaModels() {
return await fetch(`https://api.novita.ai/v3/openai/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.title,
organization:
model.id.split("/")[0].charAt(0).toUpperCase() +
model.id.split("/")[0].slice(1),
maxLength: model.context_size,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
NovitaLLM,
fetchNovitaModels,
};

View File

@ -1,3 +1,18 @@
/**
* @typedef {object} DocumentMetadata
* @property {string} id - eg; "123e4567-e89b-12d3-a456-426614174000"
* @property {string} url - eg; "file://example.com/index.html"
* @property {string} title - eg; "example.com/index.html"
* @property {string} docAuthor - eg; "no author found"
* @property {string} description - eg; "No description found."
* @property {string} docSource - eg; "URL link uploaded by the user."
* @property {string} chunkSource - eg; link://https://example.com
* @property {string} published - ISO 8601 date string
* @property {number} wordCount - Number of words in the document
* @property {string} pageContent - The raw text content of the document
* @property {number} token_count_estimate - Number of tokens in the document
*/
function isNullOrNaN(value) {
if (value === null) return true;
return isNaN(value);
@ -29,10 +44,12 @@ class TextSplitter {
console.log(`\x1b[35m[TextSplitter]\x1b[0m ${text}`, ...args);
}
// Does a quick check to determine the text chunk length limit.
// Embedder models have hard-set limits that cannot be exceeded, just like an LLM context
// so here we want to allow override of the default 1000, but up to the models maximum, which is
// sometimes user defined.
/**
* Does a quick check to determine the text chunk length limit.
* Embedder models have hard-set limits that cannot be exceeded, just like an LLM context
* so here we want to allow override of the default 1000, but up to the models maximum, which is
* sometimes user defined.
*/
static determineMaxChunkSize(preferred = null, embedderLimit = 1000) {
const prefValue = isNullOrNaN(preferred)
? Number(embedderLimit)
@ -45,6 +62,70 @@ class TextSplitter {
return prefValue > limit ? limit : prefValue;
}
/**
* Creates a string of metadata to be prepended to each chunk.
* @param {DocumentMetadata} metadata - Metadata to be prepended to each chunk.
* @returns {{[key: ('title' | 'published' | 'source')]: string}} Object of metadata that will be prepended to each chunk.
*/
static buildHeaderMeta(metadata = {}) {
if (!metadata || Object.keys(metadata).length === 0) return null;
const PLUCK_MAP = {
title: {
as: "sourceDocument",
pluck: (metadata) => {
return metadata?.title || null;
},
},
published: {
as: "published",
pluck: (metadata) => {
return metadata?.published || null;
},
},
chunkSource: {
as: "source",
pluck: (metadata) => {
const validPrefixes = ["link://", "youtube://"];
// If the chunkSource is a link or youtube link, we can add the URL
// as its source in the metadata so the LLM can use it for context.
// eg prompt: Where did you get this information? -> answer: "from https://example.com"
if (
!metadata?.chunkSource || // Exists
!metadata?.chunkSource.length || // Is not empty
typeof metadata.chunkSource !== "string" || // Is a string
!validPrefixes.some(
(prefix) => metadata.chunkSource.startsWith(prefix) // Has a valid prefix we respect
)
)
return null;
// We know a prefix is present, so we can split on it and return the rest.
// If nothing is found, return null and it will not be added to the metadata.
let source = null;
for (const prefix of validPrefixes) {
source = metadata.chunkSource.split(prefix)?.[1] || null;
if (source) break;
}
return source;
},
},
};
const pluckedData = {};
Object.entries(PLUCK_MAP).forEach(([key, value]) => {
if (!(key in metadata)) return; // Skip if the metadata key is not present.
const pluckedValue = value.pluck(metadata);
if (!pluckedValue) return; // Skip if the plucked value is null/empty.
pluckedData[value.as] = pluckedValue;
});
return pluckedData;
}
/**
* Creates a string of metadata to be prepended to each chunk.
*/
stringifyHeader() {
if (!this.config.chunkHeaderMeta) return null;
let content = "";

View File

@ -791,6 +791,8 @@ ${this.getHistory({ to: route.to })
return new Providers.ApiPieProvider({ model: config.model });
case "xai":
return new Providers.XAIProvider({ model: config.model });
case "novita":
return new Providers.NovitaProvider({ model: config.model });
default:
throw new Error(

View File

@ -80,6 +80,9 @@ const webBrowsing = {
case "tavily-search":
engine = "_tavilySearch";
break;
case "duckduckgo-engine":
engine = "_duckDuckGoEngine";
break;
default:
engine = "_googleSearchEngine";
}
@ -499,6 +502,66 @@ const webBrowsing = {
);
return JSON.stringify(data);
},
_duckDuckGoEngine: async function (query) {
this.super.introspect(
`${this.caller}: Using DuckDuckGo to search for "${
query.length > 100 ? `${query.slice(0, 100)}...` : query
}"`
);
const searchURL = new URL("https://html.duckduckgo.com/html");
searchURL.searchParams.append("q", query);
const response = await fetch(searchURL.toString());
if (!response.ok) {
return `There was an error searching DuckDuckGo. Status: ${response.status}`;
}
const html = await response.text();
const data = [];
const results = html.split('<div class="result results_links');
// Skip first element since it's before the first result
for (let i = 1; i < results.length; i++) {
const result = results[i];
// Extract title
const titleMatch = result.match(
/<a[^>]*class="result__a"[^>]*>(.*?)<\/a>/
);
const title = titleMatch ? titleMatch[1].trim() : "";
// Extract URL
const urlMatch = result.match(
/<a[^>]*class="result__a"[^>]*href="([^"]*)">/
);
const link = urlMatch ? urlMatch[1] : "";
// Extract snippet
const snippetMatch = result.match(
/<a[^>]*class="result__snippet"[^>]*>(.*?)<\/a>/
);
const snippet = snippetMatch
? snippetMatch[1].replace(/<\/?b>/g, "").trim()
: "";
if (title && link && snippet) {
data.push({ title, link, snippet });
}
}
if (data.length === 0) {
return `No information was found online for the search query.`;
}
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
);
return JSON.stringify(data);
},
});
},
};

View File

@ -206,6 +206,14 @@ class Provider {
apiKey: process.env.LITE_LLM_API_KEY ?? null,
...config,
});
case "novita":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.novita.ai/v3/openai",
},
apiKey: process.env.NOVITA_LLM_API_KEY ?? null,
...config,
});
default:
throw new Error(`Unsupported provider ${provider} for this task.`);

View File

@ -22,6 +22,11 @@ class AWSBedrockProvider extends InheritMultiple([Provider, UnTooled]) {
credentials: {
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
// If we're using a session token, we need to pass it in as a credential
// otherwise we must omit it so it does not conflict if using IAM auth
...(this.authMethod === "sessionToken"
? { sessionToken: process.env.AWS_BEDROCK_LLM_SESSION_TOKEN }
: {}),
},
model,
});
@ -31,6 +36,17 @@ class AWSBedrockProvider extends InheritMultiple([Provider, UnTooled]) {
this.verbose = true;
}
/**
* Get the authentication method for the AWS Bedrock LLM.
* There are only two valid values for this setting - anything else will default to "iam".
* @returns {"iam"|"sessionToken"}
*/
get authMethod() {
const method = process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam";
if (!["iam", "sessionToken"].includes(method)) return "iam";
return method;
}
get client() {
return this._client;
}

View File

@ -18,6 +18,7 @@ const DeepSeekProvider = require("./deepseek.js");
const LiteLLMProvider = require("./litellm.js");
const ApiPieProvider = require("./apipie.js");
const XAIProvider = require("./xai.js");
const NovitaProvider = require("./novita.js");
module.exports = {
OpenAIProvider,
@ -40,4 +41,5 @@ module.exports = {
LiteLLMProvider,
ApiPieProvider,
XAIProvider,
NovitaProvider,
};

View File

@ -0,0 +1,115 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The agent provider for the Novita AI provider.
*/
class NovitaProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = "gryphe/mythomax-l2-13b" } = config;
super();
const client = new OpenAI({
baseURL: "https://api.novita.ai/v3/openai",
apiKey: process.env.NOVITA_LLM_API_KEY,
maxRetries: 3,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-Novita-Source": "anythingllm",
},
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Novita chat: No results!");
if (result.choices.length === 0)
throw new Error("Novita chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog("Will assume chat completion without tool call inputs.");
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since Novita AI has no cost basis.
*/
getCost() {
return 0;
}
}
module.exports = NovitaProvider;

View File

@ -20,19 +20,40 @@ const {
* not persist between invocations
*/
class EphemeralAgentHandler extends AgentHandler {
/** @type {string|null} the unique identifier for the agent invocation */
#invocationUUID = null;
/** @type {import("@prisma/client").workspaces|null} the workspace to use for the agent */
#workspace = null;
/** @type {import("@prisma/client").users|null} the user id to use for the agent */
#userId = null;
/** @type {import("@prisma/client").workspace_threads|null} the workspace thread id to use for the agent */
#threadId = null;
/** @type {string|null} the session id to use for the agent */
#sessionId = null;
/** @type {string|null} the prompt to use for the agent */
#prompt = null;
/** @type {string[]} the functions to load into the agent (Aibitat plugins) */
#funcsToLoad = [];
/** @type {AIbitat|null} */
aibitat = null;
/** @type {string|null} */
channel = null;
/** @type {string|null} */
provider = null;
/** @type {string|null} the model to use for the agent */
model = null;
/**
* @param {{
* uuid: string,
* workspace: import("@prisma/client").workspaces,
* prompt: string,
* userId: import("@prisma/client").users["id"]|null,
* threadId: import("@prisma/client").workspace_threads["id"]|null,
* sessionId: string|null
* }} parameters
*/
constructor({
uuid,
workspace,
@ -148,8 +169,7 @@ class EphemeralAgentHandler extends AgentHandler {
}
// The provider was explicitly set, so check if the workspace has an agent model set.
if (this.invocation.workspace.agentModel)
return this.invocation.workspace.agentModel;
if (this.#workspace.agentModel) return this.#workspace.agentModel;
// Otherwise, we have no model to use - so guess a default model to use via the provider
// and it's system ENV params and if that fails - we return either a base model or null.

View File

@ -173,6 +173,10 @@ class AgentHandler {
if (!process.env.XAI_LLM_API_KEY)
throw new Error("xAI API Key must be provided to use agents.");
break;
case "novita":
if (!process.env.NOVITA_LLM_API_KEY)
throw new Error("Novita API Key must be provided to use agents.");
break;
default:
throw new Error(
@ -234,6 +238,8 @@ class AgentHandler {
return process.env.APIPIE_LLM_MODEL_PREF ?? null;
case "xai":
return process.env.XAI_LLM_MODEL_PREF ?? "grok-beta";
case "novita":
return process.env.NOVITA_LLM_MODEL_PREF ?? "gryphe/mythomax-l2-13b";
default:
return null;
}

View File

@ -11,7 +11,10 @@ const { v4 } = require("uuid");
// This line is only relevant for Render/Railway.
const RENDER_STORAGE = path.resolve(__dirname, `../../../collector/hotdir`);
// Handle File uploads for auto-uploading.
/**
* Handle File uploads for auto-uploading.
* Mostly used for internal GUI/API uploads.
*/
const fileUploadStorage = multer.diskStorage({
destination: function (_, __, cb) {
const uploadOutput = RENDER_STORAGE;
@ -28,6 +31,24 @@ const fileUploadStorage = multer.diskStorage({
},
});
/**
* Handle API file upload as documents - this does not manipulate the filename
* at all for encoding/charset reasons.
*/
const fileAPIUploadStorage = multer.diskStorage({
destination: function (_, __, cb) {
const uploadOutput = RENDER_STORAGE;
// const uploadOutput =
// process.env.NODE_ENV === "development"
// ? path.resolve(__dirname, `../../../collector/hotdir`)
// : path.resolve(process.env.STORAGE_DIR, `../../collector/hotdir`);
cb(null, uploadOutput);
},
filename: function (_, file, cb) {
cb(null, file.originalname);
},
});
// Asset storage for logos
const assetUploadStorage = multer.diskStorage({
destination: function (_, __, cb) {
@ -46,7 +67,9 @@ const assetUploadStorage = multer.diskStorage({
},
});
// Asset sub-storage manager for pfp icons.
/**
* Handle PFP file upload as logos
*/
const pfpUploadStorage = multer.diskStorage({
destination: function (_, __, cb) {
const uploadOutput =
@ -63,7 +86,12 @@ const pfpUploadStorage = multer.diskStorage({
},
});
// Handle Generic file upload as documents
/**
* Handle Generic file upload as documents from the GUI
* @param {Request} request
* @param {Response} response
* @param {NextFunction} next
*/
function handleFileUpload(request, response, next) {
const upload = multer({ storage: fileUploadStorage }).single("file");
upload(request, response, function (err) {
@ -81,7 +109,33 @@ function handleFileUpload(request, response, next) {
});
}
// Handle logo asset uploads
/**
* Handle API file upload as documents - this does not manipulate the filename
* at all for encoding/charset reasons.
* @param {Request} request
* @param {Response} response
* @param {NextFunction} next
*/
function handleAPIFileUpload(request, response, next) {
const upload = multer({ storage: fileAPIUploadStorage }).single("file");
upload(request, response, function (err) {
if (err) {
response
.status(500)
.json({
success: false,
error: `Invalid file upload. ${err.message}`,
})
.end();
return;
}
next();
});
}
/**
* Handle logo asset uploads
*/
function handleAssetUpload(request, response, next) {
const upload = multer({ storage: assetUploadStorage }).single("logo");
upload(request, response, function (err) {
@ -99,7 +153,9 @@ function handleAssetUpload(request, response, next) {
});
}
// Handle PFP file upload as logos
/**
* Handle PFP file upload as logos
*/
function handlePfpUpload(request, response, next) {
const upload = multer({ storage: pfpUploadStorage }).single("file");
upload(request, response, function (err) {
@ -119,6 +175,7 @@ function handlePfpUpload(request, response, next) {
module.exports = {
handleFileUpload,
handleAPIFileUpload,
handleAssetUpload,
handlePfpUpload,
};

View File

@ -4,6 +4,7 @@ const { perplexityModels } = require("../AiProviders/perplexity");
const { togetherAiModels } = require("../AiProviders/togetherAi");
const { fireworksAiModels } = require("../AiProviders/fireworksAi");
const { ElevenLabsTTS } = require("../TextToSpeech/elevenLabs");
const { fetchNovitaModels } = require("../AiProviders/novita");
const SUPPORT_CUSTOM_MODELS = [
"openai",
"localai",
@ -21,6 +22,7 @@ const SUPPORT_CUSTOM_MODELS = [
"groq",
"deepseek",
"apipie",
"novita",
"xai",
];
@ -61,6 +63,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getDeepSeekModels(apiKey);
case "apipie":
return await getAPIPieModels(apiKey);
case "novita":
return await getNovitaModels();
case "xai":
return await getXAIModels(apiKey);
default:
@ -362,6 +366,20 @@ async function getOpenRouterModels() {
return { models, error: null };
}
async function getNovitaModels() {
const knownModels = await fetchNovitaModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getAPIPieModels(apiKey = null) {
const knownModels = await fetchApiPieModels(apiKey);
if (!Object.keys(knownModels).length === 0)

View File

@ -165,6 +165,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return new ApiPieLLM(embedder, model);
case "novita":
const { NovitaLLM } = require("../AiProviders/novita");
return new NovitaLLM(embedder, model);
case "xai":
const { XAiLLM } = require("../AiProviders/xai");
return new XAiLLM(embedder, model);
@ -297,6 +300,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return ApiPieLLM;
case "novita":
const { NovitaLLM } = require("../AiProviders/novita");
return NovitaLLM;
case "xai":
const { XAiLLM } = require("../AiProviders/xai");
return XAiLLM;

View File

@ -213,6 +213,13 @@ const KEY_MAPPING = {
},
// AWS Bedrock LLM InferenceSettings
AwsBedrockLLMConnectionMethod: {
envKey: "AWS_BEDROCK_LLM_CONNECTION_METHOD",
checks: [
(input) =>
["iam", "sessionToken"].includes(input) ? null : "Invalid value",
],
},
AwsBedrockLLMAccessKeyId: {
envKey: "AWS_BEDROCK_LLM_ACCESS_KEY_ID",
checks: [isNotEmpty],
@ -221,6 +228,10 @@ const KEY_MAPPING = {
envKey: "AWS_BEDROCK_LLM_ACCESS_KEY",
checks: [isNotEmpty],
},
AwsBedrockLLMSessionToken: {
envKey: "AWS_BEDROCK_LLM_SESSION_TOKEN",
checks: [],
},
AwsBedrockLLMRegion: {
envKey: "AWS_BEDROCK_LLM_REGION",
checks: [isNotEmpty],
@ -384,6 +395,20 @@ const KEY_MAPPING = {
checks: [],
},
// Novita Options
NovitaLLMApiKey: {
envKey: "NOVITA_LLM_API_KEY",
checks: [isNotEmpty],
},
NovitaLLMModelPref: {
envKey: "NOVITA_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
NovitaLLMTimeout: {
envKey: "NOVITA_LLM_TIMEOUT_MS",
checks: [],
},
// Groq Options
GroqApiKey: {
envKey: "GROQ_API_KEY",
@ -644,6 +669,7 @@ function supportedLLM(input = "") {
"huggingface",
"perplexity",
"openrouter",
"novita",
"groq",
"koboldcpp",
"textgenwebui",
@ -698,9 +724,13 @@ function validAnthropicModel(input = "") {
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-latest",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
];
return validModels.includes(input)
@ -899,6 +929,8 @@ function dumpENV() {
"HTTPS_KEY_PATH",
// Other Configuration Keys
"DISABLE_VIEW_CHAT_HISTORY",
// Simple SSO
"SIMPLE_SSO_ENABLED",
];
// Simple sanitization of each value to prevent ENV injection via newline or quote escaping.

View File

@ -0,0 +1,39 @@
const { SystemSettings } = require("../../models/systemSettings");
/**
* Checks if simple SSO is enabled for issuance of temporary auth tokens.
* Note: This middleware must be called after `validApiKey`.
* @param {import("express").Request} request
* @param {import("express").Response} response
* @param {import("express").NextFunction} next
* @returns {void}
*/
async function simpleSSOEnabled(_, response, next) {
if (!("SIMPLE_SSO_ENABLED" in process.env)) {
return response
.status(403)
.send(
"Simple SSO is not enabled. It must be enabled to validate or issue temporary auth tokens."
);
}
// If the multi-user mode response local is not set, we need to check if it's enabled.
if (!("multiUserMode" in response.locals)) {
const multiUserMode = await SystemSettings.isMultiUserMode();
response.locals.multiUserMode = multiUserMode;
}
if (!response.locals.multiUserMode) {
return response
.status(403)
.send(
"Multi-User mode is not enabled. It must be enabled to use Simple SSO."
);
}
next();
}
module.exports = {
simpleSSOEnabled,
};

View File

@ -160,10 +160,7 @@ const AstraDB = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -251,10 +251,7 @@ const Chroma = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -240,10 +240,7 @@ const LanceDb = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -203,10 +203,7 @@ const Milvus = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -146,10 +146,7 @@ const PineconeDB = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -222,10 +222,7 @@ const QDrant = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -262,10 +262,7 @@ const Weaviate = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -196,10 +196,7 @@ const Zilliz = {
{ label: "text_splitter_chunk_overlap" },
20
),
chunkHeaderMeta: {
sourceDocument: metadata?.title,
published: metadata?.published || "unknown",
},
chunkHeaderMeta: TextSplitter.buildHeaderMeta(metadata),
});
const textChunks = await textSplitter.splitText(pageContent);

View File

@ -10,25 +10,24 @@
lodash.assignwith "^4.2.0"
typical "^7.1.1"
"@anthropic-ai/sdk@^0.16.1":
version "0.16.1"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.16.1.tgz#7472c42389d9a5323c20afa53995e1c3b922b95d"
integrity sha512-vHgvfWEyFy5ktqam56Nrhv8MVa7EJthsRYNi+1OrFFfyrj9tR2/aji1QbVbQjYU/pPhPFaYrdCEC/MLPFrmKwA==
"@anthropic-ai/sdk@^0.20.1":
version "0.20.9"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.20.9.tgz#f7c983861774e1595d5941a4d02774b7aa87c780"
integrity sha512-Lq74+DhiEQO6F9/gdVOLmHx57pX45ebK2Q/zH14xYe1157a7QeUVknRqIp0Jz5gQI01o7NKbuv9Dag2uQsLjDg==
dependencies:
"@types/node" "^18.11.18"
"@types/node-fetch" "^2.6.4"
abort-controller "^3.0.0"
agentkeepalive "^4.2.1"
digest-fetch "^1.3.0"
form-data-encoder "1.7.2"
formdata-node "^4.3.2"
node-fetch "^2.6.7"
web-streams-polyfill "^3.2.1"
"@anthropic-ai/sdk@^0.20.1":
version "0.20.8"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.20.8.tgz#310fbf9110c61032ed55863525607a327f94c328"
integrity sha512-dTMDrWYIFyoSr9P0b/gT2Nu1scBuEq4LU9SGX901ktP4aQxs2jiSWq6A80pRmVxyjFl3ngFvcOmVVrP0NHNhOg==
"@anthropic-ai/sdk@^0.32.1":
version "0.32.1"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.32.1.tgz#d22c8ebae2adccc59d78fb416e89de337ff09014"
integrity sha512-U9JwTrDvdQ9iWuABVsMLj8nJVwAyQz6QXvgLsVhryhCEPkLsbcP/MXxm+jYcAwLoV8ESbaTTjnD4kuAFa+Hyjg==
dependencies:
"@types/node" "^18.11.18"
"@types/node-fetch" "^2.6.4"
@ -37,7 +36,6 @@
form-data-encoder "1.7.2"
formdata-node "^4.3.2"
node-fetch "^2.6.7"
web-streams-polyfill "^3.2.1"
"@anthropic-ai/sdk@^0.9.1":
version "0.9.1"
@ -3836,9 +3834,9 @@ fast-xml-parser@4.2.5:
strnum "^1.0.5"
fast-xml-parser@^4.3.5:
version "4.3.6"
resolved "https://registry.yarnpkg.com/fast-xml-parser/-/fast-xml-parser-4.3.6.tgz#190f9d99097f0c8f2d3a0e681a10404afca052ff"
integrity sha512-M2SovcRxD4+vC493Uc2GZVcZaj66CCJhWurC4viynVSTvrpErCShNcDz1lAho6n9REQKvL/ll4A4/fw6Y9z8nw==
version "4.5.0"
resolved "https://registry.yarnpkg.com/fast-xml-parser/-/fast-xml-parser-4.5.0.tgz#2882b7d01a6825dfdf909638f2de0256351def37"
integrity sha512-/PlTQCI96+fZMAOLMZK4CWG1ItCbfZ/0jx7UIJFChPNrx7tcEgerUgWbeieCM9MfHInUDyK8DWYZ+YrywDJuTg==
dependencies:
strnum "^1.0.5"