merge with master

This commit is contained in:
timothycarambat 2024-09-25 14:27:30 -07:00
commit cdfc83e38d
76 changed files with 1901 additions and 259 deletions

View File

@ -47,6 +47,7 @@
"streamable",
"textgenwebui",
"togetherai",
"fireworksai",
"Unembed",
"vectordbs",
"Weaviate",

View File

@ -55,7 +55,8 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
## Cool features of AnythingLLM
- 🆕 **Multi-modal support (both closed and open-source LLMs!)**
- 🆕 [**Custom AI Agents**](https://docs.anythingllm.com/agent/custom/introduction)
- 🖼️ **Multi-modal support (both closed and open-source LLMs!)**
- 👤 Multi-user instance support and permissioning _Docker version only_
- 🦾 Agents inside your workspace (browse the web, run code, etc)
- 💬 [Custom Embeddable Chat widget for your website](./embed/README.md) _Docker version only_
@ -83,6 +84,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [LM Studio (all models)](https://lmstudio.ai)
- [LocalAi (all models)](https://localai.io/)
- [Together AI (chat models)](https://www.together.ai/)
- [Fireworks AI (chat models)](https://fireworks.ai/)
- [Perplexity (chat models)](https://www.perplexity.ai/)
- [OpenRouter (chat models)](https://openrouter.ai/)
- [Mistral](https://mistral.ai/)

View File

@ -58,10 +58,7 @@ async function getPageContent(link) {
launchOptions: {
headless: "new",
ignoreHTTPSErrors: true,
args: [
'--no-sandbox',
'--disable-setuid-sandbox'
]
args: ["--no-sandbox", "--disable-setuid-sandbox"],
},
gotoOptions: {
waitUntil: "domcontentloaded",

View File

@ -199,9 +199,8 @@ async function fetchConfluencePage({
*/
function generateAPIBaseUrl(matchResult = {}, isCustomDomain = false) {
const { subdomain } = matchResult;
let subpath = isCustomDomain ? `` : `/wiki`;
if (isCustomDomain) return `https://${customDomain}${subpath}`;
return `https://${subdomain}.atlassian.net${subpath}`;
if (isCustomDomain) return `https://${subdomain}`;
return `https://${subdomain}.atlassian.net/wiki`;
}
/**

View File

@ -44,16 +44,16 @@ class GitLabRepoLoader {
#validGitlabUrl() {
const UrlPattern = require("url-pattern");
const validPatterns = [
new UrlPattern("https\\://gitlab.com/(:projectId(*))", {
segmentValueCharset: "a-zA-Z0-9-._~%/+",
new UrlPattern("https\\://gitlab.com/(:author*)/(:project(*))", {
segmentValueCharset: "a-zA-Z0-9-._~%+",
}),
// This should even match the regular hosted URL, but we may want to know
// if this was a hosted GitLab (above) or a self-hosted (below) instance
// since the API interface could be different.
new UrlPattern(
"(:protocol(http|https))\\://(:hostname*)/(:projectId(*))",
"(:protocol(http|https))\\://(:hostname*)/(:author*)/(:project(*))",
{
segmentValueCharset: "a-zA-Z0-9-._~%/+",
segmentValueCharset: "a-zA-Z0-9-._~%+",
}
),
];
@ -64,9 +64,9 @@ class GitLabRepoLoader {
match = pattern.match(this.repo);
}
if (!match) return false;
const [author, project] = match.projectId.split("/");
const { author, project } = match;
this.projectId = encodeURIComponent(match.projectId);
this.projectId = encodeURIComponent(`${author}/${project}`);
this.apiBase = new URL(this.repo).origin;
this.author = author;
this.project = project;
@ -159,34 +159,55 @@ class GitLabRepoLoader {
async getRepoBranches() {
if (!this.#validGitlabUrl() || !this.projectId) return [];
await this.#validateAccessToken();
this.branches = [];
let fetching = true;
let page = 1;
let perPage = 50;
while (fetching) {
try {
this.branches = await fetch(
`${this.apiBase}/api/v4/projects/${this.projectId}/repository/branches`,
const params = new URLSearchParams({
per_page: perPage,
page,
});
const response = await fetch(
`${this.apiBase}/api/v4/projects/${
this.projectId
}/repository/branches?${params.toString()}`,
{
method: "GET",
headers: {
Accepts: "application/json",
...(this.accessToken ? { "PRIVATE-TOKEN": this.accessToken } : {}),
...(this.accessToken
? { "PRIVATE-TOKEN": this.accessToken }
: {}),
},
}
)
.then((res) => res.json())
.then((branches) => {
if (!Array.isArray(branches) || branches.length === 0) {
fetching = false;
return [];
}
return branches.map((b) => b.name);
})
.catch((e) => {
console.error(e);
fetching = false;
return [];
});
return this.#branchPrefSort(this.branches);
this.branches.push(...response);
page++;
} catch (err) {
console.log(`RepoLoader.getRepoBranches`, err);
this.branches = [];
fetching = false;
return [];
}
}
return this.#branchPrefSort(this.branches);
}
/**
* Returns list of all file objects from tree API for GitLab

View File

@ -101,6 +101,10 @@ GID='1000'
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
# LLM_PROVIDER='fireworksai'
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
###########################################
######## Embedding API SElECTION ##########
###########################################

2
embed

@ -1 +1 @@
Subproject commit 22a0848d58e3a758d85d93d9204a72a65854ea94
Subproject commit 6bd51d251ff1b204d7d88cdda0061df00676386e

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@ export default function AwsBedrockLLMOptions({ settings }) {
You should use a properly defined IAM user for inferencing.
<br />
<a
href="https://docs.useanything.com/setup/llm-configuration/cloud/aws-bedrock"
href="https://docs.anythingllm.com/setup/llm-configuration/cloud/aws-bedrock"
target="_blank"
className="underline flex gap-x-1 items-center"
>

View File

@ -0,0 +1,99 @@
import System from "@/models/system";
import { useState, useEffect } from "react";
export default function FireworksAiOptions({ settings }) {
return (
<div className="flex gap-[36px] mt-1.5">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Fireworks AI API Key
</label>
<input
type="password"
name="FireworksAiLLMApiKey"
className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="Fireworks AI API Key"
defaultValue={settings?.FireworksAiLLMApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<FireworksAiModelSelection settings={settings} />
)}
</div>
);
}
function FireworksAiModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
setLoading(true);
const { models } = await System.customModels("fireworksai");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
findCustomModels();
}, []);
if (loading || Object.keys(groupedModels).length === 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="FireworksAiLLMModelPref"
disabled={true}
className="border-none bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="FireworksAiLLMModelPref"
required={true}
className="border-none bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.FireworksAiLLMModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}

View File

@ -0,0 +1,79 @@
import { useRef, useEffect } from "react";
export default function ContextMenu({
contextMenu,
closeContextMenu,
files,
selectedItems,
setSelectedItems,
}) {
const contextMenuRef = useRef(null);
useEffect(() => {
const handleClickOutside = (event) => {
if (
contextMenuRef.current &&
!contextMenuRef.current.contains(event.target)
) {
closeContextMenu();
}
};
document.addEventListener("mousedown", handleClickOutside);
return () => {
document.removeEventListener("mousedown", handleClickOutside);
};
}, [closeContextMenu]);
const isAllSelected = () => {
const allItems = files.items.flatMap((folder) => [
folder.name,
...folder.items.map((file) => file.id),
]);
return allItems.every((item) => selectedItems[item]);
};
const toggleSelectAll = () => {
if (isAllSelected()) {
setSelectedItems({});
} else {
const newSelectedItems = {};
files.items.forEach((folder) => {
newSelectedItems[folder.name] = true;
folder.items.forEach((file) => {
newSelectedItems[file.id] = true;
});
});
setSelectedItems(newSelectedItems);
}
closeContextMenu();
};
if (!contextMenu.visible) return null;
return (
<div
ref={contextMenuRef}
style={{
position: "fixed",
top: `${contextMenu.y}px`,
left: `${contextMenu.x}px`,
zIndex: 1000,
}}
className="bg-zinc-800 border border-zinc-700 rounded-md shadow-lg"
>
<button
onClick={toggleSelectAll}
className="block w-full text-left px-4 py-2 text-sm text-white hover:bg-zinc-700"
>
{isAllSelected() ? "Unselect All" : "Select All"}
</button>
<button
onClick={closeContextMenu}
className="block w-full text-left px-4 py-2 text-sm text-white hover:bg-zinc-700"
>
Cancel
</button>
</div>
);
}

View File

@ -12,6 +12,7 @@ import { useModal } from "@/hooks/useModal";
import NewFolderModal from "./NewFolderModal";
import debounce from "lodash.debounce";
import { filterFileSearchResults } from "./utils";
import ContextMenu from "./ContextMenu";
function Directory({
files,
@ -35,6 +36,11 @@ function Directory({
openModal: openFolderModal,
closeModal: closeFolderModal,
} = useModal();
const [contextMenu, setContextMenu] = useState({
visible: false,
x: 0,
y: 0,
});
useEffect(() => {
setAmountSelected(Object.keys(selectedItems).length);
@ -171,8 +177,18 @@ function Directory({
}, 500);
const filteredFiles = filterFileSearchResults(files, searchTerm);
const handleContextMenu = (event) => {
event.preventDefault();
setContextMenu({ visible: true, x: event.clientX, y: event.clientY });
};
const closeContextMenu = () => {
setContextMenu({ visible: false, x: 0, y: 0 });
};
return (
<div className="px-8 pb-8">
<div className="px-8 pb-8" onContextMenu={handleContextMenu}>
<div className="flex flex-col gap-y-6">
<div className="flex items-center justify-between w-[560px] px-5 relative">
<h3 className="text-white text-base font-bold">My Documents</h3>
@ -298,6 +314,13 @@ function Directory({
/>
</div>
)}
<ContextMenu
contextMenu={contextMenu}
closeContextMenu={closeContextMenu}
files={files}
selectedItems={selectedItems}
setSelectedItems={setSelectedItems}
/>
</div>
);
}

View File

@ -19,8 +19,13 @@ export default function WorkspaceFileRow({
fetchKeys,
hasChanges,
movedItems,
selected,
toggleSelection,
disableSelection,
setSelectedItems,
}) {
const onRemoveClick = async () => {
const onRemoveClick = async (e) => {
e.stopPropagation();
setLoading(true);
try {
@ -33,24 +38,49 @@ export default function WorkspaceFileRow({
} catch (error) {
console.error("Failed to remove document:", error);
}
setSelectedItems({});
setLoadingMessage("");
setLoading(false);
};
function toggleRowSelection(e) {
if (disableSelection) return;
e.stopPropagation();
toggleSelection();
}
function handleRowSelection(e) {
e.stopPropagation();
toggleSelection();
}
const isMovedItem = movedItems?.some((movedItem) => movedItem.id === item.id);
return (
<div
className={`items-center text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 hover:bg-sky-500/20 cursor-pointer ${
isMovedItem ? "bg-green-800/40" : "file-row"
}`}
className={`items-center h-[34px] text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 ${
!disableSelection ? "hover:bg-sky-500/20 cursor-pointer" : ""
} ${isMovedItem ? "bg-green-800/40" : "file-row"} ${selected ? "selected" : ""}`}
onClick={toggleRowSelection}
>
<div
className="col-span-10 w-fit flex gap-x-[2px] items-center relative"
data-tooltip-id={`ws-directory-item-${item.url}`}
className="col-span-10 w-fit flex gap-x-[4px] items-center relative"
>
<div className="shrink-0 w-3 h-3">
{!disableSelection ? (
<div
className="w-full h-full rounded border-[1px] border-white flex justify-center items-center cursor-pointer"
role="checkbox"
aria-checked={selected}
tabIndex={0}
onClick={handleRowSelection}
>
{selected && <div className="w-2 h-2 bg-white rounded-[2px]" />}
</div>
) : null}
</div>
<File
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px] ml-3"
className="shrink-0 text-base font-bold w-4 h-4 mr-[3px] ml-1"
weight="fill"
/>
<p className="whitespace-nowrap overflow-hidden text-ellipsis">
@ -105,8 +135,9 @@ const PinItemToWorkspace = memo(({ workspace, docPath, item }) => {
const [hover, setHover] = useState(false);
const pinEvent = new CustomEvent("pinned_document");
const updatePinStatus = async () => {
const updatePinStatus = async (e) => {
try {
e.stopPropagation();
if (!pinned) window.dispatchEvent(pinEvent);
const success = await Workspace.setPinForDocument(
workspace.slug,

View File

@ -7,6 +7,7 @@ import { Eye, PushPin } from "@phosphor-icons/react";
import { SEEN_DOC_PIN_ALERT, SEEN_WATCH_ALERT } from "@/utils/constants";
import paths from "@/utils/paths";
import { Link } from "react-router-dom";
import Workspace from "@/models/workspace";
function WorkspaceDirectory({
workspace,
@ -22,6 +23,66 @@ function WorkspaceDirectory({
embeddingCosts,
movedItems,
}) {
const [selectedItems, setSelectedItems] = useState({});
const toggleSelection = (item) => {
setSelectedItems((prevSelectedItems) => {
const newSelectedItems = { ...prevSelectedItems };
if (newSelectedItems[item.id]) {
delete newSelectedItems[item.id];
} else {
newSelectedItems[item.id] = true;
}
return newSelectedItems;
});
};
const toggleSelectAll = () => {
const allItems = files.items.flatMap((folder) => folder.items);
const allSelected = allItems.every((item) => selectedItems[item.id]);
if (allSelected) {
setSelectedItems({});
} else {
const newSelectedItems = {};
allItems.forEach((item) => {
newSelectedItems[item.id] = true;
});
setSelectedItems(newSelectedItems);
}
};
const removeSelectedItems = async () => {
setLoading(true);
setLoadingMessage("Removing selected files from workspace");
const itemsToRemove = Object.keys(selectedItems).map((itemId) => {
const folder = files.items.find((f) =>
f.items.some((i) => i.id === itemId)
);
const item = folder.items.find((i) => i.id === itemId);
return `${folder.name}/${item.name}`;
});
try {
await Workspace.modifyEmbeddings(workspace.slug, {
adds: [],
deletes: itemsToRemove,
});
await fetchKeys(true);
setSelectedItems({});
} catch (error) {
console.error("Failed to remove documents:", error);
}
setLoadingMessage("");
setLoading(false);
};
const handleSaveChanges = (e) => {
setSelectedItems({});
saveChanges(e);
};
if (loading) {
return (
<div className="px-8">
@ -31,11 +92,14 @@ function WorkspaceDirectory({
</h3>
</div>
<div className="relative w-[560px] h-[445px] bg-zinc-900 rounded-2xl mt-5">
<div className="text-white/80 text-xs grid grid-cols-12 py-2 px-8">
<p className="col-span-5">Name</p>
<div className="text-white/80 text-xs grid grid-cols-12 py-2 px-3.5 border-b border-white/20 bg-zinc-900 sticky top-0 z-10 rounded-t-2xl">
<div className="col-span-10 flex items-center gap-x-[4px]">
<div className="shrink-0 w-3 h-3" />
<p className="ml-[7px]">Name</p>
</div>
<p className="col-span-2" />
</div>
<div className="w-full h-full flex items-center justify-center flex-col gap-y-5">
<div className="w-full h-[calc(100%-40px)] flex items-center justify-center flex-col gap-y-5">
<PreLoader />
<p className="text-white/80 text-sm font-semibold animate-pulse text-center w-1/3">
{loadingMessage}
@ -54,24 +118,50 @@ function WorkspaceDirectory({
{workspace.name}
</h3>
</div>
<div className="relative w-[560px] h-[445px] mt-5">
<div
className={`relative w-[560px] h-[445px] bg-zinc-900 rounded-2xl mt-5 overflow-y-auto border-4 ${
highlightWorkspace ? "border-cyan-300/80" : "border-transparent"
className={`absolute inset-0 rounded-2xl ${
highlightWorkspace ? "border-4 border-cyan-300/80 z-[999]" : ""
}`}
/>
<div className="relative w-full h-full bg-zinc-900 rounded-2xl overflow-hidden">
<div className="text-white/80 text-xs grid grid-cols-12 py-2 px-3.5 border-b border-white/20 bg-zinc-900 sticky top-0 z-10">
<div className="col-span-10 flex items-center gap-x-[4px]">
{!hasChanges &&
files.items.some((folder) => folder.items.length > 0) ? (
<div
className="shrink-0 w-3 h-3 rounded border-[1px] border-white flex justify-center items-center cursor-pointer"
role="checkbox"
aria-checked={
Object.keys(selectedItems).length ===
files.items.reduce(
(sum, folder) => sum + folder.items.length,
0
)
}
tabIndex={0}
onClick={toggleSelectAll}
>
<div className="text-white/80 text-xs grid grid-cols-12 py-2 px-8 border-b border-white/20 bg-zinc-900 sticky top-0 z-10">
<p className="col-span-5">Name</p>
{Object.keys(selectedItems).length ===
files.items.reduce(
(sum, folder) => sum + folder.items.length,
0
) && <div className="w-2 h-2 bg-white rounded-[2px]" />}
</div>
) : (
<div className="shrink-0 w-3 h-3" />
)}
<p className="ml-[7px]">Name</p>
</div>
<p className="col-span-2" />
</div>
<div className="w-full h-full flex flex-col z-0">
{Object.values(files.items).some(
(folder) => folder.items.length > 0
) || movedItems.length > 0 ? (
<>
{files.items.map((folder) =>
folder.items.map((item, index) => (
<div className="overflow-y-auto h-[calc(100%-40px)]">
{files.items.some((folder) => folder.items.length > 0) ||
movedItems.length > 0 ? (
<RenderFileRows files={files} movedItems={movedItems}>
{({ item, folder }) => (
<WorkspaceFileRow
key={index}
key={item.id}
item={item}
folderName={folder.name}
workspace={workspace}
@ -80,10 +170,13 @@ function WorkspaceDirectory({
fetchKeys={fetchKeys}
hasChanges={hasChanges}
movedItems={movedItems}
selected={selectedItems[item.id]}
toggleSelection={() => toggleSelection(item)}
disableSelection={hasChanges}
setSelectedItems={setSelectedItems}
/>
))
)}
</>
</RenderFileRows>
) : (
<div className="w-full h-full flex items-center justify-center">
<p className="text-white text-opacity-40 text-sm font-medium">
@ -92,6 +185,33 @@ function WorkspaceDirectory({
</div>
)}
</div>
{Object.keys(selectedItems).length > 0 && !hasChanges && (
<div className="absolute bottom-[12px] left-0 right-0 flex justify-center pointer-events-none">
<div className="mx-auto bg-white/40 rounded-lg py-1 px-2 pointer-events-auto">
<div className="flex flex-row items-center gap-x-2">
<button
onClick={toggleSelectAll}
className="border-none text-sm font-semibold bg-white h-[30px] px-2.5 rounded-lg hover:text-white hover:bg-neutral-800/80"
>
{Object.keys(selectedItems).length ===
files.items.reduce(
(sum, folder) => sum + folder.items.length,
0
)
? "Deselect All"
: "Select All"}
</button>
<button
onClick={removeSelectedItems}
className="border-none text-sm font-semibold bg-white h-[30px] px-2.5 rounded-lg hover:text-white hover:bg-neutral-800/80"
>
Remove Selected
</button>
</div>
</div>
</div>
)}
</div>
</div>
{hasChanges && (
<div className="flex items-center justify-between py-6">
@ -111,7 +231,7 @@ function WorkspaceDirectory({
</div>
<button
onClick={saveChanges}
onClick={(e) => handleSaveChanges(e)}
className="border border-slate-200 px-5 py-2.5 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
>
Save and Embed
@ -258,4 +378,22 @@ const DocumentWatchAlert = memo(() => {
);
});
function RenderFileRows({ files, movedItems, children }) {
function sortMovedItemsAndFiles(a, b) {
const aIsMovedItem = movedItems.some((movedItem) => movedItem.id === a.id);
const bIsMovedItem = movedItems.some((movedItem) => movedItem.id === b.id);
if (aIsMovedItem && !bIsMovedItem) return -1;
if (!aIsMovedItem && bIsMovedItem) return 1;
return 0;
}
return files.items
.flatMap((folder) => folder.items)
.sort(sortMovedItemsAndFiles)
.map((item) => {
const folder = files.items.find((f) => f.items.includes(item));
return children({ item, folder });
});
}
export default memo(WorkspaceDirectory);

View File

@ -3,7 +3,9 @@ import System from "@/models/system";
export default function ElevenLabsOptions({ settings }) {
const [inputValue, setInputValue] = useState(settings?.TTSElevenLabsKey);
const [openAIKey, setOpenAIKey] = useState(settings?.TTSElevenLabsKey);
const [elevenLabsKey, setElevenLabsKey] = useState(
settings?.TTSElevenLabsKey
);
return (
<div className="flex gap-x-4">
@ -21,11 +23,11 @@ export default function ElevenLabsOptions({ settings }) {
autoComplete="off"
spellCheck={false}
onChange={(e) => setInputValue(e.target.value)}
onBlur={() => setOpenAIKey(inputValue)}
onBlur={() => setElevenLabsKey(inputValue)}
/>
</div>
{!settings?.credentialsOnly && (
<ElevenLabsModelSelection settings={settings} apiKey={openAIKey} />
<ElevenLabsModelSelection settings={settings} apiKey={elevenLabsKey} />
)}
</div>
);
@ -84,7 +86,6 @@ function ElevenLabsModelSelection({ apiKey, settings }) {
<select
name="TTSElevenLabsVoiceModel"
required={true}
defaultValue={settings?.TTSElevenLabsVoiceModel}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
@ -92,7 +93,11 @@ function ElevenLabsModelSelection({ apiKey, settings }) {
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option key={model.id} value={model.id}>
<option
key={model.id}
value={model.id}
selected={model.id === settings?.TTSElevenLabsVoiceModel}
>
{model.name}
</option>
))}

View File

@ -1,6 +1,6 @@
import React, { useEffect, useRef, useState } from "react";
import HistoricalMessage from "./HistoricalMessage";
import PromptReply from "./PromptReply";
import { useEffect, useRef, useState } from "react";
import { useManageWorkspaceModal } from "../../../Modals/ManageWorkspace";
import ManageWorkspace from "../../../Modals/ManageWorkspace";
import { ArrowDown } from "@phosphor-icons/react";
@ -10,6 +10,7 @@ import Chartable from "./Chartable";
import Workspace from "@/models/workspace";
import { useParams } from "react-router-dom";
import paths from "@/utils/paths";
import Appearance from "@/models/appearance";
export default function ChatHistory({
history = [],
@ -19,12 +20,16 @@ export default function ChatHistory({
regenerateAssistantMessage,
hasAttachments = false,
}) {
const lastScrollTopRef = useRef(0);
const { user } = useUser();
const { threadSlug = null } = useParams();
const { showing, showModal, hideModal } = useManageWorkspaceModal();
const [isAtBottom, setIsAtBottom] = useState(true);
const chatHistoryRef = useRef(null);
const [textSize, setTextSize] = useState("normal");
const [isUserScrolling, setIsUserScrolling] = useState(false);
const showScrollbar = Appearance.getSettings()?.showScrollbar || false;
const isStreaming = history[history.length - 1]?.animate;
const getTextSizeClass = (size) => {
switch (size) {
@ -56,35 +61,44 @@ export default function ChatHistory({
}, []);
useEffect(() => {
if (isAtBottom) scrollToBottom();
}, [history]);
if (!isUserScrolling && (isAtBottom || isStreaming)) {
scrollToBottom(false); // Use instant scroll for auto-scrolling
}
}, [history, isAtBottom, isStreaming, isUserScrolling]);
const handleScroll = (e) => {
const { scrollTop, scrollHeight, clientHeight } = e.target;
const isBottom = scrollHeight - scrollTop === clientHeight;
// Detect if this is a user-initiated scroll
if (Math.abs(scrollTop - lastScrollTopRef.current) > 10) {
setIsUserScrolling(!isBottom);
}
const handleScroll = () => {
const diff =
chatHistoryRef.current.scrollHeight -
chatHistoryRef.current.scrollTop -
chatHistoryRef.current.clientHeight;
// Fuzzy margin for what qualifies as "bottom". Stronger than straight comparison since that may change over time.
const isBottom = diff <= 10;
setIsAtBottom(isBottom);
lastScrollTopRef.current = scrollTop;
};
const debouncedScroll = debounce(handleScroll, 100);
useEffect(() => {
function watchScrollEvent() {
if (!chatHistoryRef.current) return null;
const chatHistoryElement = chatHistoryRef.current;
if (!chatHistoryElement) return null;
if (chatHistoryElement) {
chatHistoryElement.addEventListener("scroll", debouncedScroll);
return () =>
chatHistoryElement.removeEventListener("scroll", debouncedScroll);
}
watchScrollEvent();
}, []);
const scrollToBottom = () => {
const scrollToBottom = (smooth = false) => {
if (chatHistoryRef.current) {
chatHistoryRef.current.scrollTo({
top: chatHistoryRef.current.scrollHeight,
behavior: "smooth",
// Smooth is on when user clicks the button but disabled during auto scroll
// We must disable this during auto scroll because it causes issues with
// detecting when we are at the bottom of the chat.
...(smooth ? { behavior: "smooth" } : {}),
});
}
};
@ -190,9 +204,12 @@ export default function ChatHistory({
return (
<div
className={`markdown text-white/80 font-light ${textSize} h-full md:h-[83%] pb-[100px] pt-6 md:pt-0 md:pb-20 md:mx-0 overflow-y-scroll flex flex-col justify-start no-scroll`}
className={`markdown text-white/80 font-light ${textSize} h-full md:h-[83%] pb-[100px] pt-6 md:pt-0 md:pb-20 md:mx-0 overflow-y-scroll flex flex-col justify-start ${
showScrollbar ? "" : "no-scroll"
}`}
id="chat-history"
ref={chatHistoryRef}
onScroll={handleScroll}
>
{history.map((props, index) => {
const isLastBotReply =
@ -247,12 +264,14 @@ export default function ChatHistory({
{!isAtBottom && (
<div className="fixed bottom-40 right-10 md:right-20 z-50 cursor-pointer animate-pulse">
<div className="flex flex-col items-center">
<div className="p-1 rounded-full border border-white/10 bg-white/10 hover:bg-white/20 hover:text-white">
<ArrowDown
weight="bold"
className="text-white/60 w-5 h-5"
onClick={scrollToBottom}
/>
<div
className="p-1 rounded-full border border-white/10 bg-white/10 hover:bg-white/20 hover:text-white"
onClick={() => {
scrollToBottom(true);
setIsUserScrolling(false);
}}
>
<ArrowDown weight="bold" className="text-white/60 w-5 h-5" />
</div>
</div>
</div>

View File

@ -9,6 +9,7 @@ import useUser from "@/hooks/useUser";
export const DndUploaderContext = createContext();
export const REMOVE_ATTACHMENT_EVENT = "ATTACHMENT_REMOVE";
export const CLEAR_ATTACHMENTS_EVENT = "ATTACHMENT_CLEAR";
export const PASTE_ATTACHMENT_EVENT = "ATTACHMENT_PASTED";
/**
* File Attachment for automatic upload on the chat container page.
@ -36,10 +37,15 @@ export function DnDFileUploaderProvider({ workspace, children }) {
useEffect(() => {
window.addEventListener(REMOVE_ATTACHMENT_EVENT, handleRemove);
window.addEventListener(CLEAR_ATTACHMENTS_EVENT, resetAttachments);
window.addEventListener(PASTE_ATTACHMENT_EVENT, handlePastedAttachment);
return () => {
window.removeEventListener(REMOVE_ATTACHMENT_EVENT, handleRemove);
window.removeEventListener(CLEAR_ATTACHMENTS_EVENT, resetAttachments);
window.removeEventListener(
PASTE_ATTACHMENT_EVENT,
handlePastedAttachment
);
};
}, []);
@ -86,6 +92,39 @@ export function DnDFileUploaderProvider({ workspace, children }) {
);
}
/**
* Handle pasted attachments.
* @param {CustomEvent<{files: File[]}>} event
*/
async function handlePastedAttachment(event) {
const { files = [] } = event.detail;
if (!files.length) return;
const newAccepted = [];
for (const file of files) {
if (file.type.startsWith("image/")) {
newAccepted.push({
uid: v4(),
file,
contentString: await toBase64(file),
status: "success",
error: null,
type: "attachment",
});
} else {
newAccepted.push({
uid: v4(),
file,
contentString: null,
status: "in_progress",
error: null,
type: "upload",
});
}
}
setFiles((prev) => [...prev, ...newAccepted]);
embedEligibleAttachments(newAccepted);
}
/**
* Handle dropped files.
* @param {Attachment[]} acceptedFiles
@ -119,8 +158,15 @@ export function DnDFileUploaderProvider({ workspace, children }) {
}
setFiles((prev) => [...prev, ...newAccepted]);
embedEligibleAttachments(newAccepted);
}
for (const attachment of newAccepted) {
/**
* Embeds attachments that are eligible for embedding - basically files that are not images.
* @param {Attachment[]} newAttachments
*/
function embedEligibleAttachments(newAttachments = []) {
for (const attachment of newAttachments) {
// Images/attachments are chat specific.
if (attachment.type === "attachment") continue;
@ -200,7 +246,7 @@ export default function DnDFileUploaderWrapper({ children }) {
/**
* Convert image types into Base64 strings for requests.
* @param {File} file
* @returns {string}
* @returns {Promise<string>}
*/
async function toBase64(file) {
return new Promise((resolve, reject) => {

View File

@ -33,7 +33,8 @@ export default function AttachmentManager({ attachments }) {
* @param {{attachment: import("../../DnDWrapper").Attachment}}
*/
function AttachmentItem({ attachment }) {
const { uid, file, status, error, document, type } = attachment;
const { uid, file, status, error, document, type, contentString } =
attachment;
const { iconBgColor, Icon } = displayFromFile(file);
function removeFileFromQueue() {
@ -127,11 +128,18 @@ function AttachmentItem({ attachment }) {
/>
</button>
</div>
{contentString ? (
<img
src={contentString}
className={`${iconBgColor} w-[30px] h-[30px] rounded-lg flex items-center justify-center`}
/>
) : (
<div
className={`${iconBgColor} rounded-lg flex items-center justify-center flex-shrink-0 p-1`}
>
<Icon size={30} className="text-white" />
</div>
)}
<div className="flex flex-col w-[130px]">
<p className="text-white text-xs font-medium truncate">
{file.name}

View File

@ -15,6 +15,7 @@ import SpeechToText from "./SpeechToText";
import { Tooltip } from "react-tooltip";
import AttachmentManager from "./Attachments";
import AttachItem from "./AttachItem";
import { PASTE_ATTACHMENT_EVENT } from "../DnDWrapper";
export const PROMPT_INPUT_EVENT = "set_prompt_input";
export default function PromptInput({
@ -91,6 +92,39 @@ export default function PromptInput({
element.style.height = `${element.scrollHeight}px`;
};
const handlePasteEvent = (e) => {
e.preventDefault();
if (e.clipboardData.items.length === 0) return false;
// paste any clipboard items that are images.
for (const item of e.clipboardData.items) {
if (item.type.startsWith("image/")) {
const file = item.getAsFile();
window.dispatchEvent(
new CustomEvent(PASTE_ATTACHMENT_EVENT, {
detail: { files: [file] },
})
);
continue;
}
// handle files specifically that are not images as uploads
if (item.kind === "file") {
const file = item.getAsFile();
window.dispatchEvent(
new CustomEvent(PASTE_ATTACHMENT_EVENT, {
detail: { files: [file] },
})
);
continue;
}
}
const pasteText = e.clipboardData.getData("text/plain");
if (pasteText) setPromptInput((prev) => prev + pasteText.trim());
return;
};
const watchForSlash = debounce(checkForSlash, 300);
const watchForAt = debounce(checkForAt, 300);
@ -125,6 +159,7 @@ export default function PromptInput({
setPromptInput(e.target.value);
}}
onKeyDown={captureEnter}
onPaste={handlePasteEvent}
required={true}
disabled={inputDisabled}
onFocus={() => setFocused(true)}

View File

@ -35,6 +35,7 @@ const PROVIDER_DEFAULT_MODELS = {
localai: [],
ollama: [],
togetherai: [],
fireworksai: [],
groq: [],
native: [],
cohere: [
@ -50,7 +51,7 @@ const PROVIDER_DEFAULT_MODELS = {
bedrock: [],
};
// For togetherAi, which has a large model list - we subgroup the options
// For providers with large model lists (e.g. togetherAi) - we subgroup the options
// by their creator organization (eg: Meta, Mistral, etc)
// which makes selection easier to read.
function groupModels(models) {
@ -61,7 +62,7 @@ function groupModels(models) {
}, {});
}
const groupedProviders = ["togetherai", "openai", "openrouter"];
const groupedProviders = ["togetherai", "fireworksai", "openai", "openrouter"];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
const [customModels, setCustomModels] = useState([]);

View File

@ -421,6 +421,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "Eingebettete Chats",
export: "Exportieren",
description:
"Dies sind alle aufgezeichneten Chats und Nachrichten von jeder Einbettung, die Sie veröffentlicht haben.",
table: {

View File

@ -430,6 +430,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "Embed Chats",
export: "Export",
description:
"These are all the recorded chats and messages from any embed that you have published.",
table: {

View File

@ -424,6 +424,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "Incrustar chats",
export: "Exportar",
description:
"Estos son todos los chats y mensajes grabados de cualquier incrustación que hayas publicado.",
table: {

View File

@ -438,6 +438,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "Chats intégrés",
export: "Exporter",
description:
"Voici tous les chats et messages enregistrés de tout widget intégré que vous avez publié.",
table: {

View File

@ -422,6 +422,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "הטמעת שיחות",
export: "ייצוא",
description: "אלה כל השיחות וההודעות שנרשמו מכל הטמעה שפרסמת.",
table: {
embed: "הטמעה",

View File

@ -435,6 +435,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "Chat incorporate",
export: "Esporta",
description:
"Queste sono tutte le chat e i messaggi registrati da qualsiasi embedding che hai pubblicato.",
table: {

View File

@ -421,6 +421,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "임베드 채팅",
export: "내보내기",
description: "게시한 임베드에서의 모든 채팅과 메시지의 기록입니다.",
table: {
embed: "임베드",

View File

@ -433,6 +433,7 @@ const TRANSLATIONS = {
"embed-chats": {
title: "Incorporar Chats",
export: "Exportar",
description:
"Estes são todos os chats e mensagens registrados de qualquer incorporação que você publicou.",
table: {

View File

@ -405,6 +405,7 @@ const TRANSLATIONS = {
},
"embed-chats": {
title: "Встраивание чатов",
export: "Экспорт",
description:
"Это все записанные чаты и сообщения от любого встраивания, которое вы опубликовали.",
table: {

View File

@ -406,6 +406,7 @@ const TRANSLATIONS = {
// Embeddable Chat History
"embed-chats": {
title: "嵌入的聊天历史纪录",
export: "导出",
description: "这些是您发布的任何嵌入的所有记录的聊天和消息。",
table: {
embed: "嵌入",

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

@ -0,0 +1,25 @@
import { APPEARANCE_SETTINGS } from "@/utils/constants";
const Appearance = {
/**
* Fetches any locally storage settings for the user
* @returns {{showScrollbar: boolean}}
*/
getSettings: () => {
const settings = localStorage.getItem(APPEARANCE_SETTINGS);
return settings ? JSON.parse(settings) : { showScrollbar: false };
},
/**
* Updates locally stored user settings
* @param {object} newSettings - new settings to update.
* @returns {object}
*/
updateSettings: (newSettings) => {
const updatedSettings = { ...Appearance.getSettings(), ...newSettings };
localStorage.setItem(APPEARANCE_SETTINGS, JSON.stringify(updatedSettings));
return updatedSettings;
},
};
export default Appearance;

View File

@ -577,9 +577,10 @@ const System = {
return { success: false, error: e.message };
});
},
exportChats: async (type = "csv") => {
exportChats: async (type = "csv", chatType = "workspace") => {
const url = new URL(`${fullApiUrl()}/system/export-chats`);
url.searchParams.append("type", encodeURIComponent(type));
url.searchParams.append("chatType", encodeURIComponent(chatType));
return await fetch(url, {
method: "GET",
headers: baseHeaders(),

View File

@ -1,21 +1,26 @@
import React, { useState } from "react";
import React, { useEffect, useState } from "react";
import DBConnection from "./DBConnection";
import { Plus, Database } from "@phosphor-icons/react";
import NewSQLConnection from "./NewConnectionModal";
import { useModal } from "@/hooks/useModal";
import SQLAgentImage from "@/media/agents/sql-agent.png";
import Admin from "@/models/admin";
export default function AgentSQLConnectorSelection({
skill,
settings,
settings, // unused.
toggleSkill,
enabled = false,
setHasChanges,
}) {
const { isOpen, openModal, closeModal } = useModal();
const [connections, setConnections] = useState(
settings?.preferences?.agent_sql_connections || []
);
const [connections, setConnections] = useState([]);
useEffect(() => {
Admin.systemPreferencesByFields(["agent_sql_connections"])
.then((res) => setConnections(res?.settings?.agent_sql_connections ?? []))
.catch(() => setConnections([]));
}, []);
return (
<>
<div className="p-2">

View File

@ -1,4 +1,5 @@
import React, { useEffect, useRef, useState } from "react";
import Admin from "@/models/admin";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import GoogleSearchIcon from "./icons/google.png";
import SearchApiIcon from "./icons/searchapi.png";
@ -119,8 +120,12 @@ export default function AgentWebSearchSelection({
}, [searchQuery, selectedProvider]);
useEffect(() => {
setSelectedProvider(settings?.preferences?.agent_search_provider ?? "none");
}, [settings?.preferences?.agent_search_provider]);
Admin.systemPreferencesByFields(["agent_search_provider"])
.then((res) =>
setSelectedProvider(res?.settings?.agent_search_provider ?? "none")
)
.catch(() => setSelectedProvider("none"));
}, []);
const selectedSearchProviderObject = SEARCH_PROVIDERS.find(
(provider) => provider.value === selectedProvider

View File

@ -70,7 +70,7 @@ export default function Confirmation({ settings, setSettings, setStep }) {
Below are your fine-tuning order details. If you have any questions
before or after ordering your fine-tune you can checkout the{" "}
<a
href="https://docs.useanything.com/fine-tuning/overview"
href="https://docs.anythingllm.com/fine-tuning/overview"
target="_blank"
rel="noreferrer"
className="underline text-sky-400"

View File

@ -49,7 +49,7 @@ export default function OrderPlaced({ settings }) {
<div className="flex flex-col items-left gap-x-4 text-xs">
<a
href="https://docs.useanything.com/fine-tuning/overview"
href="https://docs.anythingllm.com/fine-tuning/overview"
target="_blank"
rel="noreferrer"
className="text-sky-400 hover:underline hover:cursor-pointer"

View File

@ -0,0 +1,56 @@
import React, { useState, useEffect } from "react";
import Appearance from "@/models/appearance";
export default function ShowScrollbar() {
const [saving, setSaving] = useState(false);
const [showScrollbar, setShowScrollbar] = useState(false);
const handleChange = async (e) => {
const newValue = e.target.checked;
setShowScrollbar(newValue);
setSaving(true);
try {
Appearance.updateSettings({ showScrollbar: newValue });
} catch (error) {
console.error("Failed to update appearance settings:", error);
setShowScrollbar(!newValue);
}
setSaving(false);
};
useEffect(() => {
function fetchSettings() {
const settings = Appearance.getSettings();
setShowScrollbar(settings.showScrollbar);
}
fetchSettings();
}, []);
return (
<div className="flex flex-col w-full gap-y-4 mt-6">
<div className="flex flex-col gap-y-1">
<h2 className="text-base leading-6 font-bold text-white">
Show chat window scrollbar
</h2>
<p className="text-xs leading-[18px] font-base text-white/60">
Enable or disable the scrollbar in the chat window
</p>
<div className="mt-2">
<label className="relative inline-flex cursor-pointer items-center">
<input
id="show_scrollbar"
type="checkbox"
name="show_scrollbar"
value="yes"
checked={showScrollbar}
onChange={handleChange}
disabled={saving}
className="peer sr-only"
/>
<div className="pointer-events-none peer h-6 w-11 rounded-full bg-stone-400 after:absolute after:left-[2px] after:top-[2px] after:h-5 after:w-5 after:rounded-full after:shadow-xl after:border after:border-gray-600 after:bg-white after:box-shadow-md after:transition-all after:content-[''] peer-checked:bg-lime-300 peer-checked:after:translate-x-full peer-checked:after:border-white peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-800"></div>
</label>
</div>
</div>
</div>
);
}

View File

@ -8,6 +8,7 @@ import { useTranslation } from "react-i18next";
import CustomAppName from "./CustomAppName";
import LanguagePreference from "./LanguagePreference";
import CustomSiteSettings from "./CustomSiteSettings";
import ShowScrollbar from "./ShowScrollbar";
export default function Appearance() {
const { t } = useTranslation();
@ -30,6 +31,7 @@ export default function Appearance() {
</p>
</div>
<LanguagePreference />
<ShowScrollbar />
<CustomLogo />
<CustomAppName />
<CustomMessages />

View File

@ -59,7 +59,7 @@ export default function WorkspaceChats() {
const { t } = useTranslation();
const handleDumpChats = async (exportType) => {
const chats = await System.exportChats(exportType);
const chats = await System.exportChats(exportType, "workspace");
if (!!chats) {
const { name, mimeType, fileExtension, filenameFunc } =
exportOptions[exportType];

View File

@ -1,4 +1,4 @@
import { useEffect, useState } from "react";
import { useEffect, useState, useRef } from "react";
import Sidebar from "@/components/SettingsSidebar";
import { isMobile } from "react-device-detect";
import * as Skeleton from "react-loading-skeleton";
@ -7,10 +7,86 @@ import useQuery from "@/hooks/useQuery";
import ChatRow from "./ChatRow";
import Embed from "@/models/embed";
import { useTranslation } from "react-i18next";
import { CaretDown, Download } from "@phosphor-icons/react";
import showToast from "@/utils/toast";
import { saveAs } from "file-saver";
import System from "@/models/system";
const exportOptions = {
csv: {
name: "CSV",
mimeType: "text/csv",
fileExtension: "csv",
filenameFunc: () => {
return `anythingllm-embed-chats-${new Date().toLocaleDateString()}`;
},
},
json: {
name: "JSON",
mimeType: "application/json",
fileExtension: "json",
filenameFunc: () => {
return `anythingllm-embed-chats-${new Date().toLocaleDateString()}`;
},
},
jsonl: {
name: "JSONL",
mimeType: "application/jsonl",
fileExtension: "jsonl",
filenameFunc: () => {
return `anythingllm-embed-chats-${new Date().toLocaleDateString()}-lines`;
},
},
jsonAlpaca: {
name: "JSON (Alpaca)",
mimeType: "application/json",
fileExtension: "json",
filenameFunc: () => {
return `anythingllm-embed-chats-${new Date().toLocaleDateString()}-alpaca`;
},
},
};
export default function EmbedChats() {
// TODO [FEAT]: Add export of embed chats
const [showMenu, setShowMenu] = useState(false);
const menuRef = useRef();
const openMenuButton = useRef();
const { t } = useTranslation();
const handleDumpChats = async (exportType) => {
const chats = await System.exportChats(exportType, "embed");
if (!!chats) {
const { name, mimeType, fileExtension, filenameFunc } =
exportOptions[exportType];
const blob = new Blob([chats], { type: mimeType });
saveAs(blob, `${filenameFunc()}.${fileExtension}`);
showToast(`Embed chats exported successfully as ${name}.`, "success");
} else {
showToast("Failed to export embed chats.", "error");
}
};
const toggleMenu = () => {
setShowMenu(!showMenu);
};
useEffect(() => {
function handleClickOutside(event) {
if (
menuRef.current &&
!menuRef.current.contains(event.target) &&
!openMenuButton.current.contains(event.target)
) {
setShowMenu(false);
}
}
document.addEventListener("mousedown", handleClickOutside);
return () => {
document.removeEventListener("mousedown", handleClickOutside);
};
}, []);
return (
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
<Sidebar />
@ -24,6 +100,38 @@ export default function EmbedChats() {
<p className="text-lg leading-6 font-bold text-white">
{t("embed-chats.title")}
</p>
<div className="relative">
<button
ref={openMenuButton}
onClick={toggleMenu}
className="flex items-center gap-x-2 px-4 py-1 rounded-lg bg-primary-button hover:text-white text-xs font-semibold hover:bg-secondary shadow-[0_4px_14px_rgba(0,0,0,0.25)] h-[34px] w-fit"
>
<Download size={18} weight="bold" />
{t("embed-chats.export")}
<CaretDown size={18} weight="bold" />
</button>
<div
ref={menuRef}
className={`${
showMenu ? "slide-down" : "slide-up hidden"
} z-20 w-fit rounded-lg absolute top-full right-0 bg-secondary mt-2 shadow-md`}
>
<div className="py-2">
{Object.entries(exportOptions).map(([key, data]) => (
<button
key={key}
onClick={() => {
handleDumpChats(key);
setShowMenu(false);
}}
className="w-full text-left px-4 py-2 text-white text-sm hover:bg-[#3D4147]"
>
{data.name}
</button>
))}
</div>
</div>
</div>
</div>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
{t("embed-chats.description")}

View File

@ -13,6 +13,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import FireworksAILogo from "@/media/llmprovider/fireworksai.jpeg";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
@ -34,6 +35,7 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import FireworksAiOptions from "@/components/LLMSelection/FireworksAiOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
@ -128,6 +130,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run open source models from Together AI.",
requiredConfig: ["TogetherAiApiKey"],
},
{
name: "Fireworks AI",
value: "fireworksai",
logo: FireworksAILogo,
options: (settings) => <FireworksAiOptions settings={settings} />,
description:
"The fastest and most efficient inference engine to build production-ready, compound AI systems.",
requiredConfig: ["FireworksAiLLMApiKey"],
},
{
name: "Mistral",
value: "mistral",

View File

@ -8,6 +8,7 @@ import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import FireworksAILogo from "@/media/llmprovider/fireworksai.jpeg";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
@ -107,6 +108,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: TogetherAILogo,
},
fireworksai: {
name: "FireworksAI",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in response creation are visible to Fireworks AI",
],
logo: FireworksAILogo,
},
mistral: {
name: "Mistral",
description: [

View File

@ -9,6 +9,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import FireworksAILogo from "@/media/llmprovider/fireworksai.jpeg";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
@ -31,6 +32,7 @@ import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import FireworksAiOptions from "@/components/LLMSelection/FireworksAiOptions";
import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
@ -130,6 +132,14 @@ const LLMS = [
options: (settings) => <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Fireworks AI",
value: "fireworksai",
logo: FireworksAILogo,
options: (settings) => <FireworksAiOptions settings={settings} />,
description:
"The fastest and most efficient inference engine to build production-ready, compound AI systems.",
},
{
name: "Mistral",
value: "mistral",

View File

@ -22,6 +22,7 @@ const ENABLED_PROVIDERS = [
"textgenwebui",
"generic-openai",
"bedrock",
"fireworksai",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.

View File

@ -9,6 +9,7 @@ export const SEEN_WATCH_ALERT = "anythingllm_watched_document_alert";
export const USER_BACKGROUND_COLOR = "bg-historical-msg-user";
export const AI_BACKGROUND_COLOR = "bg-historical-msg-system";
export const APPEARANCE_SETTINGS = "anythingllm_appearance_settings";
export const OLLAMA_COMMON_URLS = [
"http://127.0.0.1:11434",

View File

@ -82,6 +82,7 @@ AnythingLLMのいくつかのクールな機能
- [LM Studio (すべてのモデル)](https://lmstudio.ai)
- [LocalAi (すべてのモデル)](https://localai.io/)
- [Together AI (チャットモデル)](https://www.together.ai/)
- [Fireworks AI (チャットモデル)](https://fireworks.ai/)
- [Perplexity (チャットモデル)](https://www.perplexity.ai/)
- [OpenRouter (チャットモデル)](https://openrouter.ai/)
- [Mistral](https://mistral.ai/)

View File

@ -78,6 +78,7 @@ AnythingLLM的一些酷炫特性
- [LM Studio (所有模型)](https://lmstudio.ai)
- [LocalAi (所有模型)](https://localai.io/)
- [Together AI (聊天模型)](https://www.together.ai/)
- [Fireworks AI (聊天模型)](https://fireworks.ai/)
- [Perplexity (聊天模型)](https://www.perplexity.ai/)
- [OpenRouter (聊天模型)](https://openrouter.ai/)
- [Mistral](https://mistral.ai/)

View File

@ -44,6 +44,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
# LLM_PROVIDER='fireworksai'
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
# LLM_PROVIDER='perplexity'
# PERPLEXITY_API_KEY='my-perplexity-key'
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'

View File

@ -546,7 +546,14 @@ function apiWorkspaceEndpoints(app) {
example: {
message: "What is AnythingLLM?",
mode: "query | chat",
sessionId: "identifier-to-partition-chats-by-external-id"
sessionId: "identifier-to-partition-chats-by-external-id",
attachments: [
{
name: "image.png",
mime: "image/png",
contentString: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
}
]
}
}
}
@ -576,7 +583,12 @@ function apiWorkspaceEndpoints(app) {
*/
try {
const { slug } = request.params;
const { message, mode = "query", sessionId = null } = reqBody(request);
const {
message,
mode = "query",
sessionId = null,
attachments = [],
} = reqBody(request);
const workspace = await Workspace.get({ slug: String(slug) });
if (!workspace) {
@ -612,6 +624,7 @@ function apiWorkspaceEndpoints(app) {
user: null,
thread: null,
sessionId: !!sessionId ? String(sessionId) : null,
attachments,
});
await Telemetry.sendTelemetry("sent_chat", {
@ -655,7 +668,14 @@ function apiWorkspaceEndpoints(app) {
example: {
message: "What is AnythingLLM?",
mode: "query | chat",
sessionId: "identifier-to-partition-chats-by-external-id"
sessionId: "identifier-to-partition-chats-by-external-id",
attachments: [
{
name: "image.png",
mime: "image/png",
contentString: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
}
]
}
}
}
@ -706,7 +726,12 @@ function apiWorkspaceEndpoints(app) {
*/
try {
const { slug } = request.params;
const { message, mode = "query", sessionId = null } = reqBody(request);
const {
message,
mode = "query",
sessionId = null,
attachments = [],
} = reqBody(request);
const workspace = await Workspace.get({ slug: String(slug) });
if (!workspace) {
@ -749,6 +774,7 @@ function apiWorkspaceEndpoints(app) {
user: null,
thread: null,
sessionId: !!sessionId ? String(sessionId) : null,
attachments,
});
await Telemetry.sendTelemetry("sent_chat", {
LLMSelection:

View File

@ -44,10 +44,7 @@ const {
isMultiUserSetup,
} = require("../utils/middleware/multiUserProtected");
const { fetchPfp, determinePfpFilepath } = require("../utils/files/pfp");
const {
prepareWorkspaceChatsForExport,
exportChatsAsType,
} = require("../utils/helpers/chat/convertTo");
const { exportChatsAsType } = require("../utils/helpers/chat/convertTo");
const { EventLogs } = require("../models/eventLogs");
const { CollectorApi } = require("../utils/collectorApi");
const {
@ -1014,13 +1011,13 @@ function systemEndpoints(app) {
[validatedRequest, flexUserRoleValid([ROLES.manager, ROLES.admin])],
async (request, response) => {
try {
const { type = "jsonl" } = request.query;
const chats = await prepareWorkspaceChatsForExport(type);
const { contentType, data } = await exportChatsAsType(chats, type);
const { type = "jsonl", chatType = "workspace" } = request.query;
const { contentType, data } = await exportChatsAsType(type, chatType);
await EventLogs.logEvent(
"exported_chats",
{
type,
chatType,
},
response.locals.user?.id
);

View File

@ -38,6 +38,13 @@ const DocumentSyncQueue = {
return new Date(Number(new Date()) + queueRecord.staleAfterMs);
},
/**
* Check if the document can be watched based on the metadata fields
* @param {object} metadata - metadata to check
* @param {string} metadata.title - title of the document
* @param {string} metadata.chunkSource - chunk source of the document
* @returns {boolean} - true if the document can be watched, false otherwise
*/
canWatch: function ({ title, chunkSource = null } = {}) {
if (chunkSource.startsWith("link://") && title.endsWith(".html"))
return true; // If is web-link material (prior to feature most chunkSources were links://)

View File

@ -57,26 +57,12 @@ const Document = {
}
},
getOnlyWorkspaceIds: async function (clause = {}) {
try {
const workspaceIds = await prisma.workspace_documents.findMany({
where: clause,
select: {
workspaceId: true,
},
});
return workspaceIds.map((record) => record.workspaceId) || [];
} catch (error) {
console.error(error.message);
return [];
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
include = null
include = null,
select = null
) {
try {
const results = await prisma.workspace_documents.findMany({
@ -84,6 +70,7 @@ const Document = {
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(include !== null ? { include } : {}),
...(select !== null ? { select: { ...select } } : {}),
});
return results;
} catch (error) {

View File

@ -446,6 +446,10 @@ const SystemSettings = {
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
// Fireworks AI API Keys
FireworksAiLLMApiKey: !!process.env.FIREWORKS_AI_LLM_API_KEY,
FireworksAiLLMModelPref: process.env.FIREWORKS_AI_LLM_MODEL_PREF,
// Perplexity AI Keys
PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,

View File

@ -1958,7 +1958,14 @@
"example": {
"message": "What is AnythingLLM?",
"mode": "query | chat",
"sessionId": "identifier-to-partition-chats-by-external-id"
"sessionId": "identifier-to-partition-chats-by-external-id",
"attachments": [
{
"name": "image.png",
"mime": "image/png",
"contentString": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
}
]
}
}
}
@ -2053,7 +2060,14 @@
"example": {
"message": "What is AnythingLLM?",
"mode": "query | chat",
"sessionId": "identifier-to-partition-chats-by-external-id"
"sessionId": "identifier-to-partition-chats-by-external-id",
"attachments": [
{
"name": "image.png",
"mime": "image/png",
"contentString": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
}
]
}
}
}

View File

@ -0,0 +1,135 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
function fireworksAiModels() {
const { MODELS } = require("./models.js");
return MODELS || {};
}
class FireworksAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.FIREWORKS_AI_LLM_API_KEY)
throw new Error("No FireworksAI API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.fireworks.ai/inference/v1",
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY ?? null,
});
this.model = modelPreference || process.env.FIREWORKS_AI_LLM_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
allModelInformation() {
return fireworksAiModels();
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const availableModels = fireworksAiModels();
return availableModels[modelName]?.maxLength || 4096;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const availableModels = this.allModelInformation();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
const availableModels = this.allModelInformation();
return availableModels.hasOwnProperty(model);
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`FireworksAI chat: ${this.model} is not valid for chat completion!`
);
const result = await this.openai.chat.completions.create({
model: this.model,
messages,
temperature,
});
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
return null;
return result.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`FireworksAI chat: ${this.model} is not valid for chat completion!`
);
const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
});
return streamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
FireworksAiLLM,
fireworksAiModels,
};

View File

@ -0,0 +1,112 @@
const MODELS = {
"accounts/fireworks/models/llama-v3p1-405b-instruct": {
id: "accounts/fireworks/models/llama-v3p1-405b-instruct",
organization: "Meta",
name: "Llama 3.1 405B Instruct",
maxLength: 131072,
},
"accounts/fireworks/models/llama-v3p1-70b-instruct": {
id: "accounts/fireworks/models/llama-v3p1-70b-instruct",
organization: "Meta",
name: "Llama 3.1 70B Instruct",
maxLength: 131072,
},
"accounts/fireworks/models/llama-v3p1-8b-instruct": {
id: "accounts/fireworks/models/llama-v3p1-8b-instruct",
organization: "Meta",
name: "Llama 3.1 8B Instruct",
maxLength: 131072,
},
"accounts/fireworks/models/llama-v3-70b-instruct": {
id: "accounts/fireworks/models/llama-v3-70b-instruct",
organization: "Meta",
name: "Llama 3 70B Instruct",
maxLength: 8192,
},
"accounts/fireworks/models/mixtral-8x22b-instruct": {
id: "accounts/fireworks/models/mixtral-8x22b-instruct",
organization: "mistralai",
name: "Mixtral MoE 8x22B Instruct",
maxLength: 65536,
},
"accounts/fireworks/models/mixtral-8x7b-instruct": {
id: "accounts/fireworks/models/mixtral-8x7b-instruct",
organization: "mistralai",
name: "Mixtral MoE 8x7B Instruct",
maxLength: 32768,
},
"accounts/fireworks/models/firefunction-v2": {
id: "accounts/fireworks/models/firefunction-v2",
organization: "Fireworks AI",
name: "Firefunction V2",
maxLength: 8192,
},
"accounts/fireworks/models/firefunction-v1": {
id: "accounts/fireworks/models/firefunction-v1",
organization: "Fireworks AI",
name: "FireFunction V1",
maxLength: 32768,
},
"accounts/fireworks/models/gemma2-9b-it": {
id: "accounts/fireworks/models/gemma2-9b-it",
organization: "Google",
name: "Gemma 2 9B Instruct",
maxLength: 8192,
},
"accounts/fireworks/models/llama-v3-70b-instruct-hf": {
id: "accounts/fireworks/models/llama-v3-70b-instruct-hf",
organization: "Hugging Face",
name: "Llama 3 70B Instruct (HF version)",
maxLength: 8192,
},
"accounts/fireworks/models/llama-v3-8b-instruct": {
id: "accounts/fireworks/models/llama-v3-8b-instruct",
organization: "Hugging Face",
name: "Llama 3 8B Instruct",
maxLength: 8192,
},
"accounts/fireworks/models/llama-v3-8b-instruct-hf": {
id: "accounts/fireworks/models/llama-v3-8b-instruct-hf",
organization: "Hugging Face",
name: "Llama 3 8B Instruct (HF version)",
maxLength: 8192,
},
"accounts/fireworks/models/mixtral-8x7b-instruct-hf": {
id: "accounts/fireworks/models/mixtral-8x7b-instruct-hf",
organization: "Hugging Face",
name: "Mixtral MoE 8x7B Instruct (HF version)",
maxLength: 32768,
},
"accounts/fireworks/models/mythomax-l2-13b": {
id: "accounts/fireworks/models/mythomax-l2-13b",
organization: "Gryphe",
name: "MythoMax L2 13b",
maxLength: 4096,
},
"accounts/fireworks/models/phi-3-vision-128k-instruct": {
id: "accounts/fireworks/models/phi-3-vision-128k-instruct",
organization: "Microsoft",
name: "Phi 3.5 Vision Instruct",
maxLength: 8192,
},
"accounts/fireworks/models/starcoder-16b": {
id: "accounts/fireworks/models/starcoder-16b",
organization: "BigCode",
name: "StarCoder 15.5B",
maxLength: 8192,
},
"accounts/fireworks/models/starcoder-7b": {
id: "accounts/fireworks/models/starcoder-7b",
organization: "BigCode",
name: "StarCoder 7B",
maxLength: 8192,
},
"accounts/fireworks/models/yi-01-ai/yi-large": {
id: "accounts/fireworks/models/yi-01-ai/yi-large",
organization: "01.AI",
name: "Yi-Large",
maxLength: 32768,
},
};
module.exports.MODELS = MODELS;

View File

@ -0,0 +1 @@
*.json

View File

@ -0,0 +1,20 @@
| Organization | Model Name | Model String for API | Context length |
|--------------|------------|----------------------|----------------|
| Meta | Llama 3.1 405B Instruct | accounts/fireworks/models/llama-v3p1-405b-instruct | 131072 |
| Meta | Llama 3.1 70B Instruct | accounts/fireworks/models/llama-v3p1-70b-instruct | 131072 |
| Meta | Llama 3.1 8B Instruct | accounts/fireworks/models/llama-v3p1-8b-instruct | 131072 |
| Meta | Llama 3 70B Instruct | accounts/fireworks/models/llama-v3-70b-instruct | 8192 |
| mistralai | Mixtral MoE 8x22B Instruct | accounts/fireworks/models/mixtral-8x22b-instruct | 65536 |
| mistralai | Mixtral MoE 8x7B Instruct | accounts/fireworks/models/mixtral-8x7b-instruct | 32768 |
| Fireworks AI | Firefunction V2 | accounts/fireworks/models/firefunction-v2 | 8192 |
| Fireworks AI | FireFunction V1 | accounts/fireworks/models/firefunction-v1 | 32768 |
| Google | Gemma 2 9B Instruct | accounts/fireworks/models/gemma2-9b-it | 8192 |
| Hugging Face | Llama 3 70B Instruct (HF version) | accounts/fireworks/models/llama-v3-70b-instruct-hf | 8192 |
| Hugging Face | Llama 3 8B Instruct | accounts/fireworks/models/llama-v3-8b-instruct | 8192 |
| Hugging Face | Llama 3 8B Instruct (HF version) | accounts/fireworks/models/llama-v3-8b-instruct-hf | 8192 |
| Hugging Face | Mixtral MoE 8x7B Instruct (HF version) | accounts/fireworks/models/mixtral-8x7b-instruct-hf | 32768 |
| Gryphe | MythoMax L2 13b | accounts/fireworks/models/mythomax-l2-13b | 4096 |
| Microsoft | Phi 3.5 Vision Instruct | accounts/fireworks/models/phi-3-vision-128k-instruct | 8192 |
| BigCode | StarCoder 15.5B | accounts/fireworks/models/starcoder-16b | 8192 |
| BigCode | StarCoder 7B | accounts/fireworks/models/starcoder-7b | 8192 |
| 01.AI | Yi-Large | accounts/fireworks/models/yi-01-ai/yi-large | 32768 |

View File

@ -0,0 +1,46 @@
// Fireworks AI does not provide a simple REST API to get models,
// so we have a table which we copy from their documentation
// at https://fireworks.ai/models that we can
// then parse and get all models from in a format that makes sense
// Why this does not exist is so bizarre, but whatever.
// To run, cd into this directory and run `node parse.mjs`
// copy outputs into the export in ../models.js
// Update the date below if you run this again because Fireworks AI added new models.
// Last Collected: Sep 15, 2024
// NOTE: Only managed to collect 18 out of ~100 models!
// https://fireworks.ai/models lists almost 100 chat language models.
// If you want to add models, please manually add them to chat_models.txt...
// ... I tried to write a script to grab them all but gave up after a few hours...
import fs from "fs";
function parseChatModels() {
const fixed = {};
const tableString = fs.readFileSync("chat_models.txt", { encoding: "utf-8" });
const rows = tableString.split("\n").slice(2);
rows.forEach((row) => {
const [provider, name, id, maxLength] = row.split("|").slice(1, -1);
const data = {
provider: provider.trim(),
name: name.trim(),
id: id.trim(),
maxLength: Number(maxLength.trim()),
};
fixed[data.id] = {
id: data.id,
organization: data.provider,
name: data.name,
maxLength: data.maxLength,
};
});
fs.writeFileSync("chat_models.json", JSON.stringify(fixed, null, 2), "utf-8");
return fixed;
}
parseChatModels();

View File

@ -53,17 +53,48 @@ class MistralLLM {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: attachment.contentString,
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
return [
prompt,
...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {

View File

@ -781,6 +781,8 @@ ${this.getHistory({ to: route.to })
return new Providers.TextWebGenUiProvider({});
case "bedrock":
return new Providers.AWSBedrockProvider({});
case "fireworksai":
return new Providers.FireworksAIProvider({ model: config.model });
default:
throw new Error(

View File

@ -125,6 +125,11 @@ class Provider {
},
...config,
});
case "fireworksai":
return new ChatOpenAI({
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
...config,
});
// OSS Model Runners
// case "anythingllm_ollama":

View File

@ -0,0 +1,118 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The agent provider for the FireworksAI provider.
* We wrap FireworksAI in UnTooled because its tool-calling may not be supported for specific models and this normalizes that.
*/
class FireworksAIProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = "accounts/fireworks/models/llama-v3p1-8b-instruct" } =
config;
super();
const client = new OpenAI({
baseURL: "https://api.fireworks.ai/inference/v1",
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
maxRetries: 0,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("FireworksAI chat: No results!");
if (result.choices.length === 0)
throw new Error("FireworksAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = FireworksAIProvider;

View File

@ -13,6 +13,7 @@ const GenericOpenAiProvider = require("./genericOpenAi.js");
const PerplexityProvider = require("./perplexity.js");
const TextWebGenUiProvider = require("./textgenwebui.js");
const AWSBedrockProvider = require("./bedrock.js");
const FireworksAIProvider = require("./fireworksai.js");
module.exports = {
OpenAIProvider,
@ -30,4 +31,5 @@ module.exports = {
PerplexityProvider,
TextWebGenUiProvider,
AWSBedrockProvider,
FireworksAIProvider,
};

View File

@ -1,5 +1,6 @@
const AIbitat = require("./aibitat");
const AgentPlugins = require("./aibitat/plugins");
const ImportedPlugin = require("./imported");
const { httpSocket } = require("./aibitat/plugins/http-socket.js");
const { WorkspaceChats } = require("../../models/workspaceChats");
const { safeJsonParse } = require("../http");
@ -160,6 +161,27 @@ class EphemeralAgentHandler extends AgentHandler {
continue;
}
// Load imported plugin. This is marked by `@@` in the array of functions to load.
// and is the @@hubID of the plugin.
if (name.startsWith("@@")) {
const hubId = name.replace("@@", "");
const valid = ImportedPlugin.validateImportedPluginHandler(hubId);
if (!valid) {
this.log(
`Imported plugin by hubId ${hubId} not found in plugin directory. Skipping inclusion to agent cluster.`
);
continue;
}
const plugin = ImportedPlugin.loadPluginByHubId(hubId);
const callOpts = plugin.parseCallOptions();
this.aibitat.use(plugin.plugin(callOpts));
this.log(
`Attached ${plugin.name} (${hubId}) imported plugin to Agent cluster`
);
continue;
}
// Load single-stage plugin.
if (!AgentPlugins.hasOwnProperty(name)) {
this.log(
@ -192,6 +214,7 @@ class EphemeralAgentHandler extends AgentHandler {
AgentPlugins.docSummarizer.name,
AgentPlugins.webScraping.name,
...(await agentSkillsFromSystemSettings()),
...(await ImportedPlugin.activeImportedPlugins()),
];
}

View File

@ -0,0 +1,158 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "AnythingLLM Agent Skill Plugin Manifest Schema",
"type": "object",
"properties": {
"active": {
"type": "boolean",
"description": "Determines if the custom agent skill is active."
},
"hubId": {
"type": "string",
"description": "Used to identify the custom agent skill. Must be the same as the parent folder name."
},
"name": {
"type": "string",
"description": "The human-readable name of the skill displayed in the AnythingLLM UI."
},
"schema": {
"type": "string",
"enum": ["skill-1.0.0"],
"description": "Must be 'skill-1.0.0'. May be updated on manifest spec changes."
},
"version": {
"type": "string",
"description": "Version of the custom agent skill, defined by the user."
},
"description": {
"type": "string",
"description": "Short description of the custom agent skill."
},
"author": {
"type": "string",
"description": "Author tag of the custom agent skill."
},
"author_url": {
"type": "string",
"format": "uri",
"description": "URL of the author of the custom agent skill."
},
"license": {
"type": "string",
"description": "License of the custom agent skill."
},
"setup_args": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "Type of value expected."
},
"required": {
"type": "boolean",
"description": "Indicates if the argument is required."
},
"input": {
"type": "object",
"properties": {
"type": {
"type": "string",
"description": "Type of input to be rendered."
},
"default": {
"type": "string",
"description": "Default value of the input."
},
"placeholder": {
"type": "string",
"description": "Placeholder text for the input."
},
"hint": {
"type": "string",
"description": "Hint text for the input."
}
},
"required": ["type"],
"additionalProperties": false
},
"value": {
"type": "string",
"description": "Preset value of the argument."
}
},
"required": ["type"],
"additionalProperties": false
},
"description": "Setup arguments used to configure the custom agent skill from the UI and make runtime arguments accessible in the handler.js file when the skill is called."
},
"examples": {
"type": "array",
"items": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "Example prompt for the custom agent skill."
},
"call": {
"type": "string",
"description": "Expected invocation format matching the input format of the custom agent skill."
}
},
"required": ["prompt", "call"],
"additionalProperties": false
},
"description": "Array of examples used to pre-inject examples into the custom agent skill."
},
"entrypoint": {
"type": "object",
"properties": {
"file": {
"type": "string",
"description": "Location of the file to be executed relative to the plugin.json file."
},
"params": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"description": {
"type": "string",
"description": "Short description of the parameter's purpose."
},
"type": {
"type": "string",
"enum": ["string", "number", "boolean"],
"description": "Type of the parameter."
}
},
"required": ["description", "type"],
"additionalProperties": false
},
"description": "Parameters expected by the custom agent skill."
}
},
"required": ["file", "params"],
"additionalProperties": false,
"description": "Defines the entrypoint of the custom agent skill and the expected inputs."
},
"imported": {
"type": "boolean",
"enum": [true],
"description": "Must be set to true."
}
},
"required": [
"active",
"hubId",
"name",
"schema",
"version",
"description",
"entrypoint",
"imported"
],
"additionalProperties": true
}

View File

@ -16,6 +16,7 @@ class AgentHandler {
lmstudio: "LMSTUDIO_MODEL_PREF",
textgenwebui: null, // does not even use `model` in API req
"generic-openai": "GENERIC_OPEN_AI_MODEL_PREF",
bedrock: "AWS_BEDROCK_LLM_MODEL_PREFERENCE",
};
invocation = null;
aibitat = null;
@ -149,11 +150,16 @@ class AgentHandler {
if (
!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID ||
!process.env.AWS_BEDROCK_LLM_ACCESS_KEY ||
!process.env.AWS_BEDROCK_LLM_REGION ||
!process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE
!process.env.AWS_BEDROCK_LLM_REGION
)
throw new Error(
"AWS Bedrock Access Keys, model and region must be provided to use agents."
"AWS Bedrock Access Keys and region must be provided to use agents."
);
break;
case "fireworksai":
if (!process.env.FIREWORKS_AI_LLM_API_KEY)
throw new Error(
"FireworksAI API Key must be provided to use agents."
);
break;
@ -164,8 +170,8 @@ class AgentHandler {
}
}
providerDefault() {
switch (this.provider) {
providerDefault(provider = this.provider) {
switch (provider) {
case "openai":
return "gpt-4o";
case "anthropic":
@ -198,11 +204,39 @@ class AgentHandler {
return null;
case "bedrock":
return null;
case "fireworksai":
return null;
default:
return "unknown";
}
}
#getFallbackProvider() {
// First, fallback to the workspace chat provider and model if they exist
if (
this.invocation.workspace.chatProvider &&
this.invocation.workspace.chatModel
) {
return {
provider: this.invocation.workspace.chatProvider,
model: this.invocation.workspace.chatModel,
};
}
// If workspace does not have chat provider and model fallback
// to system provider and try to load provider default model
const systemProvider = process.env.LLM_PROVIDER;
const systemModel = this.providerDefault(systemProvider);
if (systemProvider && systemModel) {
return {
provider: systemProvider,
model: systemModel,
};
}
return null;
}
/**
* Finds or assumes the model preference value to use for API calls.
* If multi-model loading is supported, we use their agent model selection of the workspace
@ -211,22 +245,41 @@ class AgentHandler {
* @returns {string} the model preference value to use in API calls
*/
#fetchModel() {
if (!Object.keys(this.noProviderModelDefault).includes(this.provider))
return this.invocation.workspace.agentModel || this.providerDefault();
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
// that will set a provider and model for us to use.
if (!this.provider) {
const fallback = this.#getFallbackProvider();
if (!fallback) throw new Error("No valid provider found for the agent.");
this.provider = fallback.provider; // re-set the provider to the fallback provider so it is not null.
return fallback.model; // set its defined model based on fallback logic.
}
// Provider has no reliable default (cant load many models) - so we need to look at system
// for the model param.
// The provider was explicitly set, so check if the workspace has an agent model set.
if (this.invocation.workspace.agentModel) {
return this.invocation.workspace.agentModel;
}
// If the provider we are using is not supported or does not support multi-model loading
// then we use the default model for the provider.
if (!Object.keys(this.noProviderModelDefault).includes(this.provider)) {
return this.providerDefault();
}
// Load the model from the system environment variable for providers with no multi-model loading.
const sysModelKey = this.noProviderModelDefault[this.provider];
if (!!sysModelKey)
return process.env[sysModelKey] ?? this.providerDefault();
if (sysModelKey) return process.env[sysModelKey] ?? this.providerDefault();
// If all else fails - look at the provider default list
// Otherwise, we have no model to use - so guess a default model to use.
return this.providerDefault();
}
#providerSetupAndCheck() {
this.provider = this.invocation.workspace.agentProvider;
this.provider = this.invocation.workspace.agentProvider ?? null; // set provider to workspace agent provider if it exists
this.model = this.#fetchModel();
if (!this.provider)
throw new Error("No valid provider found for the agent.");
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
this.checkSetup();
}

View File

@ -29,6 +29,7 @@ const { Telemetry } = require("../../models/telemetry");
* user: import("@prisma/client").users|null,
* thread: import("@prisma/client").workspace_threads|null,
* sessionId: string|null,
* attachments: { name: string; mime: string; contentString: string }[],
* }} parameters
* @returns {Promise<ResponseObject>}
*/
@ -39,6 +40,7 @@ async function chatSync({
user = null,
thread = null,
sessionId = null,
attachments = [],
}) {
const uuid = uuidv4();
const chatMode = mode ?? "chat";
@ -251,6 +253,7 @@ async function chatSync({
userPrompt: message,
contextTexts,
chatHistory,
attachments,
},
rawHistory
);
@ -301,6 +304,7 @@ async function chatSync({
* user: import("@prisma/client").users|null,
* thread: import("@prisma/client").workspace_threads|null,
* sessionId: string|null,
* attachments: { name: string; mime: string; contentString: string }[],
* }} parameters
* @returns {Promise<VoidFunction>}
*/
@ -312,6 +316,7 @@ async function streamChat({
user = null,
thread = null,
sessionId = null,
attachments = [],
}) {
const uuid = uuidv4();
const chatMode = mode ?? "chat";
@ -536,6 +541,7 @@ async function streamChat({
userPrompt: message,
contextTexts,
chatHistory,
attachments,
},
rawHistory
);

View File

@ -44,6 +44,7 @@ async function viewLocalFiles() {
items: [],
};
const subfiles = fs.readdirSync(folderPath);
const filenames = {};
for (const subfile of subfiles) {
if (path.extname(subfile) !== ".json") continue;
@ -51,30 +52,32 @@ async function viewLocalFiles() {
const rawData = fs.readFileSync(filePath, "utf8");
const cachefilename = `${file}/${subfile}`;
const { pageContent, ...metadata } = JSON.parse(rawData);
const pinnedInWorkspaces = await Document.getOnlyWorkspaceIds({
docpath: cachefilename,
pinned: true,
});
const watchedInWorkspaces = liveSyncAvailable
? await Document.getOnlyWorkspaceIds({
docpath: cachefilename,
watched: true,
})
: [];
subdocs.items.push({
name: subfile,
type: "file",
...metadata,
cached: await cachedVectorInformation(cachefilename, true),
pinnedWorkspaces: pinnedInWorkspaces,
canWatch: liveSyncAvailable
? DocumentSyncQueue.canWatch(metadata)
: false,
// Is file watched in any workspace since sync updates all workspaces where file is referenced
watched: watchedInWorkspaces.length !== 0,
// pinnedWorkspaces: [], // This is the list of workspaceIds that have pinned this document
// watched: false, // boolean to indicate if this document is watched in ANY workspace
});
filenames[cachefilename] = subfile;
}
// Grab the pinned workspaces and watched documents for this folder's documents
// at the time of the query so we don't have to re-query the database for each file
const pinnedWorkspacesByDocument =
await getPinnedWorkspacesByDocument(filenames);
const watchedDocumentsFilenames =
await getWatchedDocumentFilenames(filenames);
for (const item of subdocs.items) {
item.pinnedWorkspaces = pinnedWorkspacesByDocument[item.name] || [];
item.watched =
watchedDocumentsFilenames.hasOwnProperty(item.name) || false;
}
directory.items.push(subdocs);
}
}
@ -88,8 +91,13 @@ async function viewLocalFiles() {
return directory;
}
// Searches the vector-cache folder for existing information so we dont have to re-embed a
// document and can instead push directly to vector db.
/**
* Searches the vector-cache folder for existing information so we dont have to re-embed a
* document and can instead push directly to vector db.
* @param {string} filename - the filename to check for cached vector information
* @param {boolean} checkOnly - if true, only check if the file exists, do not return the cached data
* @returns {Promise<{exists: boolean, chunks: any[]}>} - a promise that resolves to an object containing the existence of the file and its cached chunks
*/
async function cachedVectorInformation(filename = null, checkOnly = false) {
if (!filename) return checkOnly ? false : { exists: false, chunks: [] };
@ -218,6 +226,61 @@ function hasVectorCachedFiles() {
return false;
}
/**
* @param {string[]} filenames - array of filenames to check for pinned workspaces
* @returns {Promise<Record<string, string[]>>} - a record of filenames and their corresponding workspaceIds
*/
async function getPinnedWorkspacesByDocument(filenames = []) {
return (
await Document.where(
{
docpath: {
in: Object.keys(filenames),
},
pinned: true,
},
null,
null,
null,
{
workspaceId: true,
docpath: true,
}
)
).reduce((result, { workspaceId, docpath }) => {
const filename = filenames[docpath];
if (!result[filename]) result[filename] = [];
if (!result[filename].includes(workspaceId))
result[filename].push(workspaceId);
return result;
}, {});
}
/**
* Get a record of filenames and their corresponding workspaceIds that have watched a document
* that will be used to determine if a document should be displayed in the watched documents sidebar
* @param {string[]} filenames - array of filenames to check for watched workspaces
* @returns {Promise<Record<string, string[]>>} - a record of filenames and their corresponding workspaceIds
*/
async function getWatchedDocumentFilenames(filenames = []) {
return (
await Document.where(
{
docpath: { in: Object.keys(filenames) },
watched: true,
},
null,
null,
null,
{ workspaceId: true, docpath: true }
)
).reduce((result, { workspaceId, docpath }) => {
const filename = filenames[docpath];
result[filename] = workspaceId;
return result;
}, {});
}
module.exports = {
findDocumentInDocuments,
cachedVectorInformation,

View File

@ -1,22 +1,25 @@
// Helpers that convert workspace chats to some supported format
// for external use by the user.
const { Workspace } = require("../../../models/workspace");
const { WorkspaceChats } = require("../../../models/workspaceChats");
const { EmbedChats } = require("../../../models/embedChats");
const { safeJsonParse } = require("../../http");
async function convertToCSV(preparedData) {
const rows = ["id,username,workspace,prompt,response,sent_at,rating"];
const headers = new Set(["id", "workspace", "prompt", "response", "sent_at"]);
preparedData.forEach((item) =>
Object.keys(item).forEach((key) => headers.add(key))
);
const rows = [Array.from(headers).join(",")];
for (const item of preparedData) {
const record = [
item.id,
escapeCsv(item.username),
escapeCsv(item.workspace),
escapeCsv(item.prompt),
escapeCsv(item.response),
item.sent_at,
item.feedback,
].join(",");
const record = Array.from(headers)
.map((header) => {
const value = item[header] ?? "";
return escapeCsv(String(value));
})
.join(",");
rows.push(record);
}
return rows.join("\n");
@ -37,29 +40,56 @@ async function convertToJSONL(workspaceChatsMap) {
.join("\n");
}
async function prepareWorkspaceChatsForExport(format = "jsonl") {
async function prepareChatsForExport(format = "jsonl", chatType = "workspace") {
if (!exportMap.hasOwnProperty(format))
throw new Error("Invalid export type.");
throw new Error(`Invalid export type: ${format}`);
const chats = await WorkspaceChats.whereWithData({}, null, null, {
let chats;
if (chatType === "workspace") {
chats = await WorkspaceChats.whereWithData({}, null, null, {
id: "asc",
});
} else if (chatType === "embed") {
chats = await EmbedChats.whereWithEmbedAndWorkspace(
{},
null,
{
id: "asc",
},
null
);
} else {
throw new Error(`Invalid chat type: ${chatType}`);
}
if (format === "csv" || format === "json") {
const preparedData = chats.map((chat) => {
const responseJson = JSON.parse(chat.response);
return {
const baseData = {
id: chat.id,
prompt: chat.prompt,
response: responseJson.text,
sent_at: chat.createdAt,
};
if (chatType === "embed") {
return {
...baseData,
workspace: chat.embed_config
? chat.embed_config.workspace.name
: "unknown workspace",
};
}
return {
...baseData,
workspace: chat.workspace ? chat.workspace.name : "unknown workspace",
username: chat.user
? chat.user.username
: chat.api_session_id !== null
? "API"
: "unknown user",
workspace: chat.workspace ? chat.workspace.name : "unknown workspace",
prompt: chat.prompt,
response: responseJson.text,
sent_at: chat.createdAt,
feedback:
rating:
chat.feedbackScore === null
? "--"
: chat.feedbackScore
@ -71,22 +101,13 @@ async function prepareWorkspaceChatsForExport(format = "jsonl") {
return preparedData;
}
const workspaceIds = [...new Set(chats.map((chat) => chat.workspaceId))];
const workspacesWithPrompts = await Promise.all(
workspaceIds.map((id) => Workspace.get({ id: Number(id) }))
);
const workspacePromptsMap = workspacesWithPrompts.reduce((acc, workspace) => {
acc[workspace.id] = workspace.openAiPrompt;
return acc;
}, {});
if (format === "jsonAlpaca") {
const preparedData = chats.map((chat) => {
const responseJson = JSON.parse(chat.response);
return {
instruction: buildSystemPrompt(
chat,
workspacePromptsMap[chat.workspaceId]
chat.workspace ? chat.workspace.openAiPrompt : null
),
input: chat.prompt,
output: responseJson.text,
@ -106,7 +127,7 @@ async function prepareWorkspaceChatsForExport(format = "jsonl") {
{
role: "system",
content:
workspacePromptsMap[workspaceId] ||
chat.workspace?.openAiPrompt ||
"Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.",
},
],
@ -150,16 +171,18 @@ const exportMap = {
};
function escapeCsv(str) {
if (str === null || str === undefined) return '""';
return `"${str.replace(/"/g, '""').replace(/\n/g, " ")}"`;
}
async function exportChatsAsType(workspaceChatsMap, format = "jsonl") {
async function exportChatsAsType(format = "jsonl", chatType = "workspace") {
const { contentType, func } = exportMap.hasOwnProperty(format)
? exportMap[format]
: exportMap.jsonl;
const chats = await prepareChatsForExport(format, chatType);
return {
contentType,
data: await func(workspaceChatsMap),
data: await func(chats),
};
}
@ -181,6 +204,6 @@ function buildSystemPrompt(chat, prompt = null) {
}
module.exports = {
prepareWorkspaceChatsForExport,
prepareChatsForExport,
exportChatsAsType,
};

View File

@ -22,6 +22,8 @@ function handleDefaultStreamResponseV2(response, stream, responseProps) {
const handleAbort = () => clientAbortedHandler(resolve, fullText);
response.on("close", handleAbort);
// Now handle the chunks from the streamed response and append to fullText.
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
@ -58,6 +60,18 @@ function handleDefaultStreamResponseV2(response, stream, responseProps) {
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
resolve(fullText); // Return what we currently have - if anything.
}
});
}

View File

@ -1,6 +1,7 @@
const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
const { perplexityModels } = require("../AiProviders/perplexity");
const { togetherAiModels } = require("../AiProviders/togetherAi");
const { fireworksAiModels } = require("../AiProviders/fireworksAi");
const { ElevenLabsTTS } = require("../TextToSpeech/elevenLabs");
const SUPPORT_CUSTOM_MODELS = [
"openai",
@ -8,6 +9,7 @@ const SUPPORT_CUSTOM_MODELS = [
"ollama",
"native-llm",
"togetherai",
"fireworksai",
"mistral",
"perplexity",
"openrouter",
@ -31,6 +33,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await ollamaAIModels(basePath);
case "togetherai":
return await getTogetherAiModels();
case "fireworksai":
return await getFireworksAiModels(apiKey);
case "mistral":
return await getMistralModels(apiKey);
case "native-llm":
@ -304,6 +308,21 @@ async function getTogetherAiModels() {
return { models, error: null };
}
async function getFireworksAiModels() {
const knownModels = fireworksAiModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getPerplexityModels() {
const knownModels = perplexityModels();
if (!Object.keys(knownModels).length === 0)

View File

@ -120,6 +120,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return new TogetherAiLLM(embedder, model);
case "fireworksai":
const { FireworksAiLLM } = require("../AiProviders/fireworksAi");
return new FireworksAiLLM(embedder, model);
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return new PerplexityLLM(embedder, model);
@ -240,6 +243,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return TogetherAiLLM;
case "fireworksai":
const { FireworksAiLLM } = require("../AiProviders/fireworksAi");
return FireworksAiLLM;
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return PerplexityLLM;

View File

@ -350,6 +350,16 @@ const KEY_MAPPING = {
checks: [isNotEmpty],
},
// Fireworks AI Options
FireworksAiLLMApiKey: {
envKey: "FIREWORKS_AI_LLM_API_KEY",
checks: [isNotEmpty],
},
FireworksAiLLMModelPref: {
envKey: "FIREWORKS_AI_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
// Perplexity Options
PerplexityApiKey: {
envKey: "PERPLEXITY_API_KEY",
@ -580,6 +590,7 @@ function supportedLLM(input = "") {
"ollama",
"native",
"togetherai",
"fireworksai",
"mistral",
"huggingface",
"perplexity",

View File

@ -122,7 +122,8 @@ async function canRespond(request, response, next) {
textResponse: null,
sources: [],
close: true,
error:
error: "Rate limit exceeded",
errorMsg:
"The quota for this chat has been reached. Try again later or contact the site owner.",
});
return;