Merge branch 'master' of github.com:Mintplex-Labs/anything-llm into render

This commit is contained in:
timothycarambat 2024-05-10 15:03:10 -07:00
commit 6c1b5477e0
44 changed files with 2219 additions and 48 deletions

View File

@ -15,19 +15,25 @@
"epub", "epub",
"GROQ", "GROQ",
"hljs", "hljs",
"huggingface",
"inferencing", "inferencing",
"koboldcpp",
"Langchain", "Langchain",
"lmstudio", "lmstudio",
"localai",
"mbox", "mbox",
"Milvus", "Milvus",
"Mintplex", "Mintplex",
"moderations", "moderations",
"Ollama", "Ollama",
"Oobabooga",
"openai", "openai",
"opendocument", "opendocument",
"openrouter", "openrouter",
"Qdrant", "Qdrant",
"Serper", "Serper",
"textgenwebui",
"togetherai",
"vectordbs", "vectordbs",
"Weaviate", "Weaviate",
"Zilliz" "Zilliz"

View File

@ -27,7 +27,7 @@ Here you can find the scripts and known working process to run AnythingLLM outsi
4. Ensure that the `server/.env` file has _at least_ these keys to start. These values will persist and this file will be automatically written and managed after your first successful boot. 4. Ensure that the `server/.env` file has _at least_ these keys to start. These values will persist and this file will be automatically written and managed after your first successful boot.
``` ```
STORAGE_DIR="/your/absolute/path/to/server/.env" STORAGE_DIR="/your/absolute/path/to/server/storage"
``` ```
5. Edit the `frontend/.env` file for the `VITE_BASE_API` to now be set to `/api`. This is documented in the .env for which one you should use. 5. Edit the `frontend/.env` file for the `VITE_BASE_API` to now be set to `/api`. This is documented in the .env for which one you should use.

View File

@ -61,6 +61,21 @@ export default function GenericOpenAiOptions({ settings }) {
autoComplete="off" autoComplete="off"
/> />
</div> </div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Max Tokens
</label>
<input
type="number"
name="GenericOpenAiMaxTokens"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Max tokens per request (eg: 1024)"
min={1}
defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
required={true}
autoComplete="off"
/>
</div>
</div> </div>
); );
} }

View File

@ -1,7 +1,6 @@
import React, { useEffect, useState } from "react"; import React, { useEffect, useState } from "react";
import System from "../../../models/system"; import System from "../../../models/system";
import { AUTH_TOKEN, AUTH_USER } from "../../../utils/constants"; import { AUTH_TOKEN, AUTH_USER } from "../../../utils/constants";
import useLogo from "../../../hooks/useLogo";
import paths from "../../../utils/paths"; import paths from "../../../utils/paths";
import showToast from "@/utils/toast"; import showToast from "@/utils/toast";
import ModalWrapper from "@/components/ModalWrapper"; import ModalWrapper from "@/components/ModalWrapper";
@ -163,7 +162,6 @@ const ResetPasswordForm = ({ onSubmit }) => {
export default function MultiUserAuth() { export default function MultiUserAuth() {
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
const [error, setError] = useState(null); const [error, setError] = useState(null);
const { logo: _initLogo } = useLogo();
const [recoveryCodes, setRecoveryCodes] = useState([]); const [recoveryCodes, setRecoveryCodes] = useState([]);
const [downloadComplete, setDownloadComplete] = useState(false); const [downloadComplete, setDownloadComplete] = useState(false);
const [user, setUser] = useState(null); const [user, setUser] = useState(null);

View File

@ -5,11 +5,19 @@ import {
ClipboardText, ClipboardText,
ThumbsUp, ThumbsUp,
ThumbsDown, ThumbsDown,
ArrowsClockwise,
} from "@phosphor-icons/react"; } from "@phosphor-icons/react";
import { Tooltip } from "react-tooltip"; import { Tooltip } from "react-tooltip";
import Workspace from "@/models/workspace"; import Workspace from "@/models/workspace";
const Actions = ({ message, feedbackScore, chatId, slug }) => { const Actions = ({
message,
feedbackScore,
chatId,
slug,
isLastMessage,
regenerateMessage,
}) => {
const [selectedFeedback, setSelectedFeedback] = useState(feedbackScore); const [selectedFeedback, setSelectedFeedback] = useState(feedbackScore);
const handleFeedback = async (newFeedback) => { const handleFeedback = async (newFeedback) => {
@ -22,6 +30,14 @@ const Actions = ({ message, feedbackScore, chatId, slug }) => {
return ( return (
<div className="flex justify-start items-center gap-x-4"> <div className="flex justify-start items-center gap-x-4">
<CopyMessage message={message} /> <CopyMessage message={message} />
{isLastMessage &&
!message?.includes("Workspace chat memory was reset!") && (
<RegenerateMessage
regenerateMessage={regenerateMessage}
slug={slug}
chatId={chatId}
/>
)}
{chatId && ( {chatId && (
<> <>
<FeedbackButton <FeedbackButton
@ -106,4 +122,26 @@ function CopyMessage({ message }) {
); );
} }
function RegenerateMessage({ regenerateMessage, chatId }) {
return (
<div className="mt-3 relative">
<button
onClick={() => regenerateMessage(chatId)}
data-tooltip-id="regenerate-assistant-text"
data-tooltip-content="Regenerate response"
className="border-none text-zinc-300"
aria-label="Regenerate"
>
<ArrowsClockwise size={18} className="mb-1" weight="fill" />
</button>
<Tooltip
id="regenerate-assistant-text"
place="bottom"
delayShow={300}
className="tooltip !text-xs"
/>
</div>
);
}
export default memo(Actions); export default memo(Actions);

View File

@ -19,6 +19,8 @@ const HistoricalMessage = ({
error = false, error = false,
feedbackScore = null, feedbackScore = null,
chatId = null, chatId = null,
isLastMessage = false,
regenerateMessage,
}) => { }) => {
return ( return (
<div <div
@ -59,6 +61,8 @@ const HistoricalMessage = ({
feedbackScore={feedbackScore} feedbackScore={feedbackScore}
chatId={chatId} chatId={chatId}
slug={workspace?.slug} slug={workspace?.slug}
isLastMessage={isLastMessage}
regenerateMessage={regenerateMessage}
/> />
</div> </div>
)} )}
@ -92,4 +96,17 @@ function ProfileImage({ role, workspace }) {
); );
} }
export default memo(HistoricalMessage); export default memo(
HistoricalMessage,
// Skip re-render the historical message:
// if the content is the exact same AND (not streaming)
// the lastMessage status is the same (regen icon)
// and the chatID matches between renders. (feedback icons)
(prevProps, nextProps) => {
return (
prevProps.message === nextProps.message &&
prevProps.isLastMessage === nextProps.isLastMessage &&
prevProps.chatId === nextProps.chatId
);
}
);

View File

@ -8,7 +8,12 @@ import debounce from "lodash.debounce";
import useUser from "@/hooks/useUser"; import useUser from "@/hooks/useUser";
import Chartable from "./Chartable"; import Chartable from "./Chartable";
export default function ChatHistory({ history = [], workspace, sendCommand }) { export default function ChatHistory({
history = [],
workspace,
sendCommand,
regenerateAssistantMessage,
}) {
const { user } = useUser(); const { user } = useUser();
const { showing, showModal, hideModal } = useManageWorkspaceModal(); const { showing, showModal, hideModal } = useManageWorkspaceModal();
const [isAtBottom, setIsAtBottom] = useState(true); const [isAtBottom, setIsAtBottom] = useState(true);
@ -165,6 +170,8 @@ export default function ChatHistory({ history = [], workspace, sendCommand }) {
feedbackScore={props.feedbackScore} feedbackScore={props.feedbackScore}
chatId={props.chatId} chatId={props.chatId}
error={props.error} error={props.error}
regenerateMessage={regenerateAssistantMessage}
isLastMessage={isLastBotReply}
/> />
); );
})} })}

View File

@ -161,10 +161,6 @@ function FirstTimeAgentUser() {
Now you can use agents for real-time web search and scraping, Now you can use agents for real-time web search and scraping,
saving documents to your browser, summarizing documents, and saving documents to your browser, summarizing documents, and
more. more.
<br />
<br />
Currently, agents only work with OpenAI as your agent LLM. All
LLM providers will be supported in the future.
</p> </p>
<p className="text-green-300/60 text-xs md:text-sm"> <p className="text-green-300/60 text-xs md:text-sm">
This feature is currently early access and fully custom agents This feature is currently early access and fully custom agents

View File

@ -0,0 +1,111 @@
import { useState } from "react";
import { X } from "@phosphor-icons/react";
import ModalWrapper from "@/components/ModalWrapper";
import { CMD_REGEX } from ".";
export default function AddPresetModal({ isOpen, onClose, onSave }) {
const [command, setCommand] = useState("");
const handleSubmit = async (e) => {
e.preventDefault();
const form = new FormData(e.target);
const sanitizedCommand = command.replace(CMD_REGEX, "");
const saved = await onSave({
command: `/${sanitizedCommand}`,
prompt: form.get("prompt"),
description: form.get("description"),
});
if (saved) setCommand("");
};
const handleCommandChange = (e) => {
const value = e.target.value.replace(CMD_REGEX, "");
setCommand(value);
};
return (
<ModalWrapper isOpen={isOpen}>
<form
onSubmit={handleSubmit}
className="relative w-full max-w-2xl max-h-full"
>
<div className="relative bg-main-gradient rounded-lg shadow">
<div className="flex items-start justify-between p-4 border-b rounded-t border-gray-500/50">
<h3 className="text-xl font-semibold text-white">Add New Preset</h3>
<button
onClick={onClose}
type="button"
className="transition-all duration-300 text-gray-400 bg-transparent hover:border-white/60 rounded-lg text-sm p-1.5 ml-auto inline-flex items-center bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
>
<X className="text-gray-300 text-lg" />
</button>
</div>
<div className="p-6 space-y-6 flex h-full w-full">
<div className="w-full flex flex-col gap-y-4">
<div>
<label className="block mb-2 text-sm font-medium text-white">
Command
</label>
<div className="flex items-center">
<span className="text-white text-sm mr-2 font-bold">/</span>
<input
name="command"
type="text"
placeholder="your-command"
value={command}
onChange={handleCommandChange}
maxLength={25}
autoComplete="off"
required={true}
className="border-none bg-zinc-900 placeholder:text-white/20 border-gray-500 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
/>
</div>
</div>
<div>
<label className="block mb-2 text-sm font-medium text-white">
Prompt
</label>
<textarea
name="prompt"
autoComplete="off"
placeholder="This is the content that will be injected in front of your prompt."
required={true}
className="border-none bg-zinc-900 placeholder:text-white/20 border-gray-500 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
></textarea>
</div>
<div>
<label className="border-none block mb-2 text-sm font-medium text-white">
Description
</label>
<input
type="text"
name="description"
placeholder="Responds with a poem about LLMs."
maxLength={80}
autoComplete="off"
required={true}
className="border-none bg-zinc-900 placeholder:text-white/20 border-gray-500 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
/>
</div>
</div>
</div>
<div className="flex w-full justify-between items-center p-6 space-x-2 border-t rounded-b border-gray-500/50">
<button
onClick={onClose}
type="button"
className="px-4 py-2 rounded-lg text-white hover:bg-stone-900 transition-all duration-300"
>
Cancel
</button>
<button
type="submit"
className="transition-all duration-300 border border-slate-200 px-4 py-2 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
>
Save
</button>
</div>
</div>
</form>
</ModalWrapper>
);
}

View File

@ -0,0 +1,148 @@
import { useState } from "react";
import { X } from "@phosphor-icons/react";
import ModalWrapper from "@/components/ModalWrapper";
import { CMD_REGEX } from ".";
export default function EditPresetModal({
isOpen,
onClose,
onSave,
onDelete,
preset,
}) {
const [command, setCommand] = useState(preset?.command?.slice(1) || "");
const [deleting, setDeleting] = useState(false);
const handleSubmit = (e) => {
e.preventDefault();
const form = new FormData(e.target);
const sanitizedCommand = command.replace(CMD_REGEX, "");
onSave({
id: preset.id,
command: `/${sanitizedCommand}`,
prompt: form.get("prompt"),
description: form.get("description"),
});
};
const handleCommandChange = (e) => {
const value = e.target.value.replace(CMD_REGEX, "");
setCommand(value);
};
const handleDelete = async () => {
const confirmDelete = window.confirm(
"Are you sure you want to delete this preset?"
);
if (!confirmDelete) return;
setDeleting(true);
await onDelete(preset.id);
setDeleting(false);
onClose();
};
return (
<ModalWrapper isOpen={isOpen}>
<form
onSubmit={handleSubmit}
className="relative w-full max-w-2xl max-h-full"
>
<div className="relative bg-main-gradient rounded-lg shadow">
<div className="flex items-start justify-between p-4 border-b rounded-t border-gray-500/50">
<h3 className="text-xl font-semibold text-white">Edit Preset</h3>
<button
onClick={onClose}
type="button"
className="transition-all duration-300 text-gray-400 bg-transparent hover:border-white/60 rounded-lg text-sm p-1.5 ml-auto inline-flex items-center bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
>
<X className="text-gray-300 text-lg" />
</button>
</div>
<div className="p-6 space-y-6 flex h-full w-full">
<div className="w-full flex flex-col gap-y-4">
<div>
<label
htmlFor="command"
className="block mb-2 text-sm font-medium text-white"
>
Command
</label>
<div className="flex items-center">
<span className="text-white text-sm mr-2 font-bold">/</span>
<input
type="text"
name="command"
placeholder="your-command"
value={command}
onChange={handleCommandChange}
required={true}
className="border-none bg-zinc-900 placeholder:text-white/20 border-gray-500 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
/>
</div>
</div>
<div>
<label
htmlFor="prompt"
className="block mb-2 text-sm font-medium text-white"
>
Prompt
</label>
<textarea
name="prompt"
placeholder="This is a test prompt. Please respond with a poem about LLMs."
defaultValue={preset.prompt}
required={true}
className="border-none bg-zinc-900 placeholder:text-white/20 border-gray-500 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
></textarea>
</div>
<div>
<label
htmlFor="description"
className="block mb-2 text-sm font-medium text-white"
>
Description
</label>
<input
type="text"
name="description"
defaultValue={preset.description}
placeholder="Responds with a poem about LLMs."
required={true}
className="border-none bg-zinc-900 placeholder:text-white/20 border-gray-500 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
/>
</div>
</div>
</div>
<div className="flex w-full justify-between items-center p-6 space-x-2 border-t rounded-b border-gray-500/50">
<div className="flex flex-col space-y-2">
<button
disabled={deleting}
onClick={handleDelete}
type="button"
className="px-4 py-2 rounded-lg text-red-500 hover:bg-red-500/25 transition-all duration-300 disabled:opacity-50"
>
{deleting ? "Deleting..." : "Delete Preset"}
</button>
</div>
<div className="flex space-x-2">
<button
onClick={onClose}
type="button"
className="px-4 py-2 rounded-lg text-white hover:bg-stone-900 transition-all duration-300"
>
Cancel
</button>
<button
type="submit"
className="transition-all duration-300 border border-slate-200 px-4 py-2 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
>
Save
</button>
</div>
</div>
</div>
</form>
</ModalWrapper>
);
}

View File

@ -0,0 +1,127 @@
import { useEffect, useState } from "react";
import { useIsAgentSessionActive } from "@/utils/chat/agent";
import AddPresetModal from "./AddPresetModal";
import EditPresetModal from "./EditPresetModal";
import { useModal } from "@/hooks/useModal";
import System from "@/models/system";
import { DotsThree, Plus } from "@phosphor-icons/react";
import showToast from "@/utils/toast";
export const CMD_REGEX = new RegExp(/[^a-zA-Z0-9_-]/g);
export default function SlashPresets({ setShowing, sendCommand }) {
const isActiveAgentSession = useIsAgentSessionActive();
const {
isOpen: isAddModalOpen,
openModal: openAddModal,
closeModal: closeAddModal,
} = useModal();
const {
isOpen: isEditModalOpen,
openModal: openEditModal,
closeModal: closeEditModal,
} = useModal();
const [presets, setPresets] = useState([]);
const [selectedPreset, setSelectedPreset] = useState(null);
useEffect(() => {
fetchPresets();
}, []);
if (isActiveAgentSession) return null;
const fetchPresets = async () => {
const presets = await System.getSlashCommandPresets();
setPresets(presets);
};
const handleSavePreset = async (preset) => {
const { error } = await System.createSlashCommandPreset(preset);
if (!!error) {
showToast(error, "error");
return false;
}
fetchPresets();
closeAddModal();
return true;
};
const handleEditPreset = (preset) => {
setSelectedPreset(preset);
openEditModal();
};
const handleUpdatePreset = async (updatedPreset) => {
const { error } = await System.updateSlashCommandPreset(
updatedPreset.id,
updatedPreset
);
if (!!error) {
showToast(error, "error");
return;
}
fetchPresets();
closeEditModal();
};
const handleDeletePreset = async (presetId) => {
await System.deleteSlashCommandPreset(presetId);
fetchPresets();
closeEditModal();
};
return (
<>
{presets.map((preset) => (
<button
key={preset.id}
onClick={() => {
setShowing(false);
sendCommand(`${preset.command} `, false);
}}
className="w-full hover:cursor-pointer hover:bg-zinc-700 px-2 py-2 rounded-xl flex flex-row justify-start"
>
<div className="w-full flex-col text-left flex pointer-events-none">
<div className="text-white text-sm font-bold">{preset.command}</div>
<div className="text-white text-opacity-60 text-sm">
{preset.description}
</div>
</div>
<button
onClick={(e) => {
e.stopPropagation();
handleEditPreset(preset);
}}
className="text-white text-sm p-1 hover:cursor-pointer hover:bg-zinc-900 rounded-full mt-1"
>
<DotsThree size={24} weight="bold" />
</button>
</button>
))}
<button
onClick={openAddModal}
className="w-full hover:cursor-pointer hover:bg-zinc-700 px-2 py-1 rounded-xl flex flex-col justify-start"
>
<div className="w-full flex-row flex pointer-events-none items-center gap-2">
<Plus size={24} weight="fill" fill="white" />
<div className="text-white text-sm font-medium">Add New Preset </div>
</div>
</button>
<AddPresetModal
isOpen={isAddModalOpen}
onClose={closeAddModal}
onSave={handleSavePreset}
/>
{selectedPreset && (
<EditPresetModal
isOpen={isEditModalOpen}
onClose={closeEditModal}
onSave={handleUpdatePreset}
onDelete={handleDeletePreset}
preset={selectedPreset}
/>
)}
</>
);
}

View File

@ -3,6 +3,7 @@ import SlashCommandIcon from "./icons/slash-commands-icon.svg";
import { Tooltip } from "react-tooltip"; import { Tooltip } from "react-tooltip";
import ResetCommand from "./reset"; import ResetCommand from "./reset";
import EndAgentSession from "./endAgentSession"; import EndAgentSession from "./endAgentSession";
import SlashPresets from "./SlashPresets";
export default function SlashCommandsButton({ showing, setShowSlashCommand }) { export default function SlashCommandsButton({ showing, setShowSlashCommand }) {
return ( return (
@ -52,10 +53,11 @@ export function SlashCommands({ showing, setShowing, sendCommand }) {
<div className="w-full flex justify-center absolute bottom-[130px] md:bottom-[150px] left-0 z-10 px-4"> <div className="w-full flex justify-center absolute bottom-[130px] md:bottom-[150px] left-0 z-10 px-4">
<div <div
ref={cmdRef} ref={cmdRef}
className="w-[600px] p-2 bg-zinc-800 rounded-2xl shadow flex-col justify-center items-start gap-2.5 inline-flex" className="w-[600px] overflow-auto p-2 bg-zinc-800 rounded-2xl shadow flex-col justify-center items-start gap-2.5 inline-flex"
> >
<ResetCommand sendCommand={sendCommand} setShowing={setShowing} /> <ResetCommand sendCommand={sendCommand} setShowing={setShowing} />
<EndAgentSession sendCommand={sendCommand} setShowing={setShowing} /> <EndAgentSession sendCommand={sendCommand} setShowing={setShowing} />
<SlashPresets sendCommand={sendCommand} setShowing={setShowing} />
</div> </div>
</div> </div>
</div> </div>

View File

@ -12,20 +12,35 @@ import AvailableAgentsButton, {
useAvailableAgents, useAvailableAgents,
} from "./AgentMenu"; } from "./AgentMenu";
import TextSizeButton from "./TextSizeMenu"; import TextSizeButton from "./TextSizeMenu";
export const PROMPT_INPUT_EVENT = "set_prompt_input";
export default function PromptInput({ export default function PromptInput({
message,
submit, submit,
onChange, onChange,
inputDisabled, inputDisabled,
buttonDisabled, buttonDisabled,
sendCommand, sendCommand,
}) { }) {
const [promptInput, setPromptInput] = useState("");
const { showAgents, setShowAgents } = useAvailableAgents(); const { showAgents, setShowAgents } = useAvailableAgents();
const { showSlashCommand, setShowSlashCommand } = useSlashCommands(); const { showSlashCommand, setShowSlashCommand } = useSlashCommands();
const formRef = useRef(null); const formRef = useRef(null);
const textareaRef = useRef(null); const textareaRef = useRef(null);
const [_, setFocused] = useState(false); const [_, setFocused] = useState(false);
// To prevent too many re-renders we remotely listen for updates from the parent
// via an event cycle. Otherwise, using message as a prop leads to a re-render every
// change on the input.
function handlePromptUpdate(e) {
setPromptInput(e?.detail ?? "");
}
useEffect(() => {
if (!!window)
window.addEventListener(PROMPT_INPUT_EVENT, handlePromptUpdate);
return () =>
window?.removeEventListener(PROMPT_INPUT_EVENT, handlePromptUpdate);
}, []);
useEffect(() => { useEffect(() => {
if (!inputDisabled && textareaRef.current) { if (!inputDisabled && textareaRef.current) {
textareaRef.current.focus(); textareaRef.current.focus();
@ -102,6 +117,7 @@ export default function PromptInput({
watchForSlash(e); watchForSlash(e);
watchForAt(e); watchForAt(e);
adjustTextArea(e); adjustTextArea(e);
setPromptInput(e.target.value);
}} }}
onKeyDown={captureEnter} onKeyDown={captureEnter}
required={true} required={true}
@ -111,7 +127,7 @@ export default function PromptInput({
setFocused(false); setFocused(false);
adjustTextArea(e); adjustTextArea(e);
}} }}
value={message} value={promptInput}
className="cursor-text max-h-[100px] md:min-h-[40px] mx-2 md:mx-0 py-2 w-full text-[16px] md:text-md text-white bg-transparent placeholder:text-white/60 resize-none active:outline-none focus:outline-none flex-grow" className="cursor-text max-h-[100px] md:min-h-[40px] mx-2 md:mx-0 py-2 w-full text-[16px] md:text-md text-white bg-transparent placeholder:text-white/60 resize-none active:outline-none focus:outline-none flex-grow"
placeholder={"Send a message"} placeholder={"Send a message"}
/> />

View File

@ -1,6 +1,6 @@
import { useState, useEffect } from "react"; import { useState, useEffect } from "react";
import ChatHistory from "./ChatHistory"; import ChatHistory from "./ChatHistory";
import PromptInput from "./PromptInput"; import PromptInput, { PROMPT_INPUT_EVENT } from "./PromptInput";
import Workspace from "@/models/workspace"; import Workspace from "@/models/workspace";
import handleChat, { ABORT_STREAM_EVENT } from "@/utils/chat"; import handleChat, { ABORT_STREAM_EVENT } from "@/utils/chat";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
@ -20,10 +20,21 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
const [chatHistory, setChatHistory] = useState(knownHistory); const [chatHistory, setChatHistory] = useState(knownHistory);
const [socketId, setSocketId] = useState(null); const [socketId, setSocketId] = useState(null);
const [websocket, setWebsocket] = useState(null); const [websocket, setWebsocket] = useState(null);
// Maintain state of message from whatever is in PromptInput
const handleMessageChange = (event) => { const handleMessageChange = (event) => {
setMessage(event.target.value); setMessage(event.target.value);
}; };
// Emit an update to the state of the prompt input without directly
// passing a prop in so that it does not re-render constantly.
function setMessageEmit(messageContent = "") {
setMessage(messageContent);
window.dispatchEvent(
new CustomEvent(PROMPT_INPUT_EVENT, { detail: messageContent })
);
}
const handleSubmit = async (event) => { const handleSubmit = async (event) => {
event.preventDefault(); event.preventDefault();
if (!message || message === "") return false; if (!message || message === "") return false;
@ -41,31 +52,54 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
]; ];
setChatHistory(prevChatHistory); setChatHistory(prevChatHistory);
setMessage(""); setMessageEmit("");
setLoadingResponse(true); setLoadingResponse(true);
}; };
const sendCommand = async (command, submit = false) => { const regenerateAssistantMessage = (chatId) => {
const updatedHistory = chatHistory.slice(0, -1);
const lastUserMessage = updatedHistory.slice(-1)[0];
Workspace.deleteChats(workspace.slug, [chatId])
.then(() => sendCommand(lastUserMessage.content, true, updatedHistory))
.catch((e) => console.error(e));
};
const sendCommand = async (command, submit = false, history = []) => {
if (!command || command === "") return false; if (!command || command === "") return false;
if (!submit) { if (!submit) {
setMessage(command); setMessageEmit(command);
return; return;
} }
const prevChatHistory = [ let prevChatHistory;
...chatHistory, if (history.length > 0) {
{ content: command, role: "user" }, // use pre-determined history chain.
{ prevChatHistory = [
content: "", ...history,
role: "assistant", {
pending: true, content: "",
userMessage: command, role: "assistant",
animate: true, pending: true,
}, userMessage: command,
]; animate: true,
},
];
} else {
prevChatHistory = [
...chatHistory,
{ content: command, role: "user" },
{
content: "",
role: "assistant",
pending: true,
userMessage: command,
animate: true,
},
];
}
setChatHistory(prevChatHistory); setChatHistory(prevChatHistory);
setMessage(""); setMessageEmit("");
setLoadingResponse(true); setLoadingResponse(true);
}; };
@ -206,9 +240,9 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
history={chatHistory} history={chatHistory}
workspace={workspace} workspace={workspace}
sendCommand={sendCommand} sendCommand={sendCommand}
regenerateAssistantMessage={regenerateAssistantMessage}
/> />
<PromptInput <PromptInput
message={message}
submit={handleSubmit} submit={handleSubmit}
onChange={handleMessageChange} onChange={handleMessageChange}
inputDisabled={loadingResponse} inputDisabled={loadingResponse}

View File

@ -567,6 +567,74 @@ const System = {
}); });
}, },
dataConnectors: DataConnector, dataConnectors: DataConnector,
getSlashCommandPresets: async function () {
return await fetch(`${API_BASE}/system/slash-command-presets`, {
method: "GET",
headers: baseHeaders(),
})
.then((res) => {
if (!res.ok) throw new Error("Could not fetch slash command presets.");
return res.json();
})
.then((res) => res.presets)
.catch((e) => {
console.error(e);
return [];
});
},
createSlashCommandPreset: async function (presetData) {
return await fetch(`${API_BASE}/system/slash-command-presets`, {
method: "POST",
headers: baseHeaders(),
body: JSON.stringify(presetData),
})
.then((res) => {
if (!res.ok) throw new Error("Could not create slash command preset.");
return res.json();
})
.then((res) => {
return { preset: res.preset, error: null };
})
.catch((e) => {
console.error(e);
return { preset: null, error: e.message };
});
},
updateSlashCommandPreset: async function (presetId, presetData) {
return await fetch(`${API_BASE}/system/slash-command-presets/${presetId}`, {
method: "POST",
headers: baseHeaders(),
body: JSON.stringify(presetData),
})
.then((res) => {
if (!res.ok) throw new Error("Could not update slash command preset.");
return res.json();
})
.then((res) => {
return { preset: res.preset, error: null };
})
.catch((e) => {
return { preset: null, error: "Failed to update this command." };
});
},
deleteSlashCommandPreset: async function (presetId) {
return await fetch(`${API_BASE}/system/slash-command-presets/${presetId}`, {
method: "DELETE",
headers: baseHeaders(),
})
.then((res) => {
if (!res.ok) throw new Error("Could not delete slash command preset.");
return true;
})
.catch((e) => {
console.error(e);
return false;
});
},
}; };
export default System; export default System;

View File

@ -74,6 +74,22 @@ const Workspace = {
.catch(() => false); .catch(() => false);
return result; return result;
}, },
deleteChats: async function (slug = "", chatIds = []) {
return await fetch(`${API_BASE}/workspace/${slug}/delete-chats`, {
method: "DELETE",
headers: baseHeaders(),
body: JSON.stringify({ chatIds }),
})
.then((res) => {
if (res.ok) return true;
throw new Error("Failed to delete chats.");
})
.catch((e) => {
console.log(e);
return false;
});
},
streamChat: async function ({ slug }, message, handleChat) { streamChat: async function ({ slug }, message, handleChat) {
const ctrl = new AbortController(); const ctrl = new AbortController();

View File

@ -5,8 +5,37 @@ import { AVAILABLE_LLM_PROVIDERS } from "@/pages/GeneralSettings/LLMPreference";
import { CaretUpDown, Gauge, MagnifyingGlass, X } from "@phosphor-icons/react"; import { CaretUpDown, Gauge, MagnifyingGlass, X } from "@phosphor-icons/react";
import AgentModelSelection from "../AgentModelSelection"; import AgentModelSelection from "../AgentModelSelection";
const ENABLED_PROVIDERS = ["openai", "anthropic", "lmstudio", "ollama"]; const ENABLED_PROVIDERS = [
const WARN_PERFORMANCE = ["lmstudio", "ollama"]; "openai",
"anthropic",
"lmstudio",
"ollama",
"localai",
"groq",
"azure",
"koboldcpp",
"togetherai",
"openrouter",
"mistral",
"perplexity",
"textgenwebui",
// TODO: More agent support.
// "generic-openai", // Need to support text-input for agent model input for this to be enabled.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
// "gemini", // Too rate limited and broken in several ways to use for agents.
];
const WARN_PERFORMANCE = [
"lmstudio",
"groq",
"azure",
"koboldcpp",
"ollama",
"localai",
"openrouter",
"generic-openai",
"textgenwebui",
];
const LLM_DEFAULT = { const LLM_DEFAULT = {
name: "Please make a selection", name: "Please make a selection",

View File

@ -55,6 +55,7 @@ const {
resetPassword, resetPassword,
generateRecoveryCodes, generateRecoveryCodes,
} = require("../utils/PasswordRecovery"); } = require("../utils/PasswordRecovery");
const { SlashCommandPresets } = require("../models/slashCommandsPresets");
function systemEndpoints(app) { function systemEndpoints(app) {
if (!app) return; if (!app) return;
@ -1049,6 +1050,111 @@ function systemEndpoints(app) {
response.sendStatus(500).end(); response.sendStatus(500).end();
} }
}); });
app.get(
"/system/slash-command-presets",
[validatedRequest, flexUserRoleValid([ROLES.all])],
async (request, response) => {
try {
const user = await userFromSession(request, response);
const userPresets = await SlashCommandPresets.getUserPresets(user?.id);
response.status(200).json({ presets: userPresets });
} catch (error) {
console.error("Error fetching slash command presets:", error);
response.status(500).json({ message: "Internal server error" });
}
}
);
app.post(
"/system/slash-command-presets",
[validatedRequest, flexUserRoleValid([ROLES.all])],
async (request, response) => {
try {
const user = await userFromSession(request, response);
const { command, prompt, description } = reqBody(request);
const presetData = {
command: SlashCommandPresets.formatCommand(String(command)),
prompt: String(prompt),
description: String(description),
};
const preset = await SlashCommandPresets.create(user?.id, presetData);
if (!preset) {
return response
.status(500)
.json({ message: "Failed to create preset" });
}
response.status(201).json({ preset });
} catch (error) {
console.error("Error creating slash command preset:", error);
response.status(500).json({ message: "Internal server error" });
}
}
);
app.post(
"/system/slash-command-presets/:slashCommandId",
[validatedRequest, flexUserRoleValid([ROLES.all])],
async (request, response) => {
try {
const user = await userFromSession(request, response);
const { slashCommandId } = request.params;
const { command, prompt, description } = reqBody(request);
// Valid user running owns the preset if user session is valid.
const ownsPreset = await SlashCommandPresets.get({
userId: user?.id ?? null,
id: Number(slashCommandId),
});
if (!ownsPreset)
return response.status(404).json({ message: "Preset not found" });
const updates = {
command: SlashCommandPresets.formatCommand(String(command)),
prompt: String(prompt),
description: String(description),
};
const preset = await SlashCommandPresets.update(
Number(slashCommandId),
updates
);
if (!preset) return response.sendStatus(422);
response.status(200).json({ preset: { ...ownsPreset, ...updates } });
} catch (error) {
console.error("Error updating slash command preset:", error);
response.status(500).json({ message: "Internal server error" });
}
}
);
app.delete(
"/system/slash-command-presets/:slashCommandId",
[validatedRequest, flexUserRoleValid([ROLES.all])],
async (request, response) => {
try {
const { slashCommandId } = request.params;
const user = await userFromSession(request, response);
// Valid user running owns the preset if user session is valid.
const ownsPreset = await SlashCommandPresets.get({
userId: user?.id ?? null,
id: Number(slashCommandId),
});
if (!ownsPreset)
return response
.status(403)
.json({ message: "Failed to delete preset" });
await SlashCommandPresets.delete(Number(slashCommandId));
response.sendStatus(204);
} catch (error) {
console.error("Error deleting slash command preset:", error);
response.status(500).json({ message: "Internal server error" });
}
}
);
} }
module.exports = { systemEndpoints }; module.exports = { systemEndpoints };

View File

@ -372,6 +372,37 @@ function workspaceEndpoints(app) {
} }
); );
app.delete(
"/workspace/:slug/delete-chats",
[validatedRequest, flexUserRoleValid([ROLES.all]), validWorkspaceSlug],
async (request, response) => {
try {
const { chatIds = [] } = reqBody(request);
const user = await userFromSession(request, response);
const workspace = response.locals.workspace;
if (!workspace || !Array.isArray(chatIds)) {
response.sendStatus(400).end();
return;
}
// This works for both workspace and threads.
// we simplify this by just looking at workspace<>user overlap
// since they are all on the same table.
await WorkspaceChats.delete({
id: { in: chatIds.map((id) => Number(id)) },
user_id: user?.id ?? null,
workspaceId: workspace.id,
});
response.sendStatus(200).end();
} catch (e) {
console.log(e.message, e);
response.sendStatus(500).end();
}
}
);
app.post( app.post(
"/workspace/:slug/chat-feedback/:chatId", "/workspace/:slug/chat-feedback/:chatId",
[validatedRequest, flexUserRoleValid([ROLES.all]), validWorkspaceSlug], [validatedRequest, flexUserRoleValid([ROLES.all]), validWorkspaceSlug],

View File

@ -0,0 +1,105 @@
const { v4 } = require("uuid");
const prisma = require("../utils/prisma");
const CMD_REGEX = new RegExp(/[^a-zA-Z0-9_-]/g);
const SlashCommandPresets = {
formatCommand: function (command = "") {
if (!command || command.length < 2) return `/${v4().split("-")[0]}`;
let adjustedCmd = command.toLowerCase(); // force lowercase
if (!adjustedCmd.startsWith("/")) adjustedCmd = `/${adjustedCmd}`; // Fix if no preceding / is found.
return `/${adjustedCmd.slice(1).toLowerCase().replace(CMD_REGEX, "-")}`; // replace any invalid chars with '-'
},
get: async function (clause = {}) {
try {
const preset = await prisma.slash_command_presets.findFirst({
where: clause,
});
return preset || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (clause = {}, limit) {
try {
const presets = await prisma.slash_command_presets.findMany({
where: clause,
take: limit || undefined,
});
return presets;
} catch (error) {
console.error(error.message);
return [];
}
},
// Command + userId must be unique combination.
create: async function (userId = null, presetData = {}) {
try {
const preset = await prisma.slash_command_presets.create({
data: {
...presetData,
// This field (uid) is either the user_id or 0 (for non-multi-user mode).
// the UID field enforces the @@unique(userId, command) constraint since
// the real relational field (userId) cannot be non-null so this 'dummy' field gives us something
// to constrain against within the context of prisma and sqlite that works.
uid: userId ? Number(userId) : 0,
userId: userId ? Number(userId) : null,
},
});
return preset;
} catch (error) {
console.error("Failed to create preset", error.message);
return null;
}
},
getUserPresets: async function (userId = null) {
try {
return (
await prisma.slash_command_presets.findMany({
where: { userId: !!userId ? Number(userId) : null },
orderBy: { createdAt: "asc" },
})
)?.map((preset) => ({
id: preset.id,
command: preset.command,
prompt: preset.prompt,
description: preset.description,
}));
} catch (error) {
console.error("Failed to get user presets", error.message);
return [];
}
},
update: async function (presetId = null, presetData = {}) {
try {
const preset = await prisma.slash_command_presets.update({
where: { id: Number(presetId) },
data: presetData,
});
return preset;
} catch (error) {
console.error("Failed to update preset", error.message);
return null;
}
},
delete: async function (presetId = null) {
try {
await prisma.slash_command_presets.delete({
where: { id: Number(presetId) },
});
return true;
} catch (error) {
console.error("Failed to delete preset", error.message);
return false;
}
},
};
module.exports.SlashCommandPresets = SlashCommandPresets;

View File

@ -377,6 +377,7 @@ const SystemSettings = {
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF, GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT, GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY, GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
// Cohere API Keys // Cohere API Keys
CohereApiKey: !!process.env.COHERE_API_KEY, CohereApiKey: !!process.env.COHERE_API_KEY,

View File

@ -0,0 +1,15 @@
-- CreateTable
CREATE TABLE "slash_command_presets" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"command" TEXT NOT NULL,
"prompt" TEXT NOT NULL,
"description" TEXT NOT NULL,
"uid" INTEGER NOT NULL DEFAULT 0,
"userId" INTEGER,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"lastUpdatedAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "slash_command_presets_userId_fkey" FOREIGN KEY ("userId") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-- CreateIndex
CREATE UNIQUE INDEX "slash_command_presets_uid_command_key" ON "slash_command_presets"("uid", "command");

View File

@ -73,6 +73,7 @@ model users {
recovery_codes recovery_codes[] recovery_codes recovery_codes[]
password_reset_tokens password_reset_tokens[] password_reset_tokens password_reset_tokens[]
workspace_agent_invocations workspace_agent_invocations[] workspace_agent_invocations workspace_agent_invocations[]
slash_command_presets slash_command_presets[]
} }
model recovery_codes { model recovery_codes {
@ -260,3 +261,17 @@ model event_logs {
@@index([event]) @@index([event])
} }
model slash_command_presets {
id Int @id @default(autoincrement())
command String
prompt String
description String
uid Int @default(0) // 0 is null user
userId Int?
createdAt DateTime @default(now())
lastUpdatedAt DateTime @default(now())
user users? @relation(fields: [userId], references: [id], onDelete: Cascade)
@@unique([uid, command])
}

View File

@ -18,6 +18,7 @@ class GenericOpenAiLLM {
}); });
this.model = this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null; modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
if (!this.model) if (!this.model)
throw new Error("GenericOpenAI must have a valid model set."); throw new Error("GenericOpenAI must have a valid model set.");
this.limits = { this.limits = {
@ -94,6 +95,7 @@ class GenericOpenAiLLM {
model: this.model, model: this.model,
messages, messages,
temperature, temperature,
max_tokens: this.maxTokens,
}) })
.catch((e) => { .catch((e) => {
throw new Error(e.response.data.error.message); throw new Error(e.response.data.error.message);
@ -110,6 +112,7 @@ class GenericOpenAiLLM {
stream: true, stream: true,
messages, messages,
temperature, temperature,
max_tokens: this.maxTokens,
}); });
return streamRequest; return streamRequest;
} }

View File

@ -743,6 +743,26 @@ ${this.getHistory({ to: route.to })
return new Providers.LMStudioProvider({}); return new Providers.LMStudioProvider({});
case "ollama": case "ollama":
return new Providers.OllamaProvider({ model: config.model }); return new Providers.OllamaProvider({ model: config.model });
case "groq":
return new Providers.GroqProvider({ model: config.model });
case "togetherai":
return new Providers.TogetherAIProvider({ model: config.model });
case "azure":
return new Providers.AzureOpenAiProvider({ model: config.model });
case "koboldcpp":
return new Providers.KoboldCPPProvider({});
case "localai":
return new Providers.LocalAIProvider({ model: config.model });
case "openrouter":
return new Providers.OpenRouterProvider({ model: config.model });
case "mistral":
return new Providers.MistralProvider({ model: config.model });
case "generic-openai":
return new Providers.GenericOpenAiProvider({ model: config.model });
case "perplexity":
return new Providers.PerplexityProvider({ model: config.model });
case "textgenwebui":
return new Providers.TextWebGenUiProvider({});
default: default:
throw new Error( throw new Error(

View File

@ -58,6 +58,9 @@ class Provider {
} }
} }
// For some providers we may want to override the system prompt to be more verbose.
// Currently we only do this for lmstudio, but we probably will want to expand this even more
// to any Untooled LLM.
static systemPrompt(provider = null) { static systemPrompt(provider = null) {
switch (provider) { switch (provider) {
case "lmstudio": case "lmstudio":

View File

@ -0,0 +1,105 @@
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the Azure OpenAI API.
*/
class AzureOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(_config = {}) {
super();
const client = new OpenAIClient(
process.env.AZURE_OPENAI_ENDPOINT,
new AzureKeyCredential(process.env.AZURE_OPENAI_KEY)
);
this._client = client;
this.model = process.env.OPEN_MODEL_PREF ?? "gpt-3.5-turbo";
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client
.getChatCompletions(this.model, messages, {
temperature: 0,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Azure OpenAI chat: No results!");
if (result.choices.length === 0)
throw new Error("Azure OpenAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.getChatCompletions(
this.model,
this.cleanMsgs(messages),
{
temperature: 0.7,
}
);
completion = response.choices[0].message;
}
return { result: completion.content, cost: 0 };
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
* Stubbed since Azure OpenAI has no public cost basis.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = AzureOpenAiProvider;

View File

@ -0,0 +1,117 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the Generic OpenAI provider.
* Since we cannot promise the generic provider even supports tool calling
* which is nearly 100% likely it does not, we can just wrap it in untooled
* which often is far better anyway.
*/
class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
super();
const { model = "gpt-3.5-turbo" } = config;
const client = new OpenAI({
baseURL: process.env.GENERIC_OPEN_AI_BASE_PATH,
apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
max_tokens: this.maxTokens,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Generic OpenAI chat: No results!");
if (result.choices.length === 0)
throw new Error("Generic OpenAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = GenericOpenAiProvider;

View File

@ -0,0 +1,113 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const { RetryError } = require("../error.js");
/**
* The provider for the Groq provider.
* Using OpenAI tool calling with groq really sucks right now
* its just fast and bad. We should probably migrate this to Untooled to improve
* coherence.
*/
class GroqProvider extends Provider {
model;
constructor(config = {}) {
const { model = "llama3-8b-8192" } = config;
const client = new OpenAI({
baseURL: "https://api.groq.com/openai/v1",
apiKey: process.env.GROQ_API_KEY,
maxRetries: 3,
});
super(client);
this.model = model;
this.verbose = true;
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
const response = await this.client.chat.completions.create({
model: this.model,
// stream: true,
messages,
...(Array.isArray(functions) && functions?.length > 0
? { functions }
: {}),
});
// Right now, we only support one completion,
// so we just take the first one in the list
const completion = response.choices[0].message;
const cost = this.getCost(response.usage);
// treat function calls
if (completion.function_call) {
let functionArgs = {};
try {
functionArgs = JSON.parse(completion.function_call.arguments);
} catch (error) {
// call the complete function again in case it gets a json error
return this.complete(
[
...messages,
{
role: "function",
name: completion.function_call.name,
function_call: completion.function_call,
content: error?.message,
},
],
functions
);
}
// console.log(completion, { functionArgs })
return {
result: null,
functionCall: {
name: completion.function_call.name,
arguments: functionArgs,
},
cost,
};
}
return {
result: completion.content,
cost,
};
} catch (error) {
// If invalid Auth error we need to abort because no amount of waiting
// will make auth better.
if (error instanceof OpenAI.AuthenticationError) throw error;
if (
error instanceof OpenAI.RateLimitError ||
error instanceof OpenAI.InternalServerError ||
error instanceof OpenAI.APIError // Also will catch AuthenticationError!!!
) {
throw new RetryError(error.message);
}
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since Groq has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = GroqProvider;

View File

@ -130,7 +130,6 @@ ${JSON.stringify(def.parameters.properties, null, 4)}\n`;
...history, ...history,
], ],
}); });
const call = safeJsonParse(response, null); const call = safeJsonParse(response, null);
if (call === null) return { toolCall: null, text: response }; // failed to parse, so must be text. if (call === null) return { toolCall: null, text: response }; // failed to parse, so must be text.

View File

@ -2,10 +2,30 @@ const OpenAIProvider = require("./openai.js");
const AnthropicProvider = require("./anthropic.js"); const AnthropicProvider = require("./anthropic.js");
const LMStudioProvider = require("./lmstudio.js"); const LMStudioProvider = require("./lmstudio.js");
const OllamaProvider = require("./ollama.js"); const OllamaProvider = require("./ollama.js");
const GroqProvider = require("./groq.js");
const TogetherAIProvider = require("./togetherai.js");
const AzureOpenAiProvider = require("./azure.js");
const KoboldCPPProvider = require("./koboldcpp.js");
const LocalAIProvider = require("./localai.js");
const OpenRouterProvider = require("./openrouter.js");
const MistralProvider = require("./mistral.js");
const GenericOpenAiProvider = require("./genericOpenAi.js");
const PerplexityProvider = require("./perplexity.js");
const TextWebGenUiProvider = require("./textgenwebui.js");
module.exports = { module.exports = {
OpenAIProvider, OpenAIProvider,
AnthropicProvider, AnthropicProvider,
LMStudioProvider, LMStudioProvider,
OllamaProvider, OllamaProvider,
GroqProvider,
TogetherAIProvider,
AzureOpenAiProvider,
KoboldCPPProvider,
LocalAIProvider,
OpenRouterProvider,
MistralProvider,
GenericOpenAiProvider,
PerplexityProvider,
TextWebGenUiProvider,
}; };

View File

@ -0,0 +1,113 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the KoboldCPP provider.
*/
class KoboldCPPProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(_config = {}) {
super();
const model = process.env.KOBOLD_CPP_MODEL_PREF ?? null;
const client = new OpenAI({
baseURL: process.env.KOBOLD_CPP_BASE_PATH?.replace(/\/+$/, ""),
apiKey: null,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("KoboldCPP chat: No results!");
if (result.choices.length === 0)
throw new Error("KoboldCPP chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since KoboldCPP has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = KoboldCPPProvider;

View File

@ -16,8 +16,8 @@ class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) {
baseURL: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance baseURL: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance
apiKey: null, apiKey: null,
maxRetries: 3, maxRetries: 3,
model,
}); });
this._client = client; this._client = client;
this.model = model; this.model = model;
this.verbose = true; this.verbose = true;

View File

@ -0,0 +1,114 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the LocalAI provider.
*/
class LocalAiProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = null } = config;
super();
const client = new OpenAI({
baseURL: process.env.LOCAL_AI_BASE_PATH,
apiKey: process.env.LOCAL_AI_API_KEY ?? null,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("LocalAI chat: No results!");
if (result.choices.length === 0)
throw new Error("LocalAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return { result: completion.content, cost: 0 };
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since LocalAI has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = LocalAiProvider;

View File

@ -0,0 +1,116 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the Mistral provider.
* Mistral limits what models can call tools and even when using those
* the model names change and dont match docs. When you do have the right model
* it still fails and is not truly OpenAI compatible so its easier to just wrap
* this with Untooled which 100% works since its just text & works far more reliably
*/
class MistralProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
super();
const { model = "mistral-medium" } = config;
const client = new OpenAI({
baseURL: "https://api.mistral.ai/v1",
apiKey: process.env.MISTRAL_API_KEY,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("LMStudio chat: No results!");
if (result.choices.length === 0)
throw new Error("LMStudio chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = MistralProvider;

View File

@ -0,0 +1,117 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the OpenRouter provider.
*/
class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = "openrouter/auto" } = config;
super();
const client = new OpenAI({
baseURL: "https://openrouter.ai/api/v1",
apiKey: process.env.OPENROUTER_API_KEY,
maxRetries: 3,
defaultHeaders: {
"HTTP-Referer": "https://useanything.com",
"X-Title": "AnythingLLM",
},
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("OpenRouter chat: No results!");
if (result.choices.length === 0)
throw new Error("OpenRouter chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since OpenRouter has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = OpenRouterProvider;

View File

@ -0,0 +1,112 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the Perplexity provider.
*/
class PerplexityProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
super();
const { model = "sonar-small-online" } = config;
const client = new OpenAI({
baseURL: "https://api.perplexity.ai",
apiKey: process.env.PERPLEXITY_API_KEY ?? null,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Perplexity chat: No results!");
if (result.choices.length === 0)
throw new Error("Perplexity chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = PerplexityProvider;

View File

@ -0,0 +1,112 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the Oobabooga provider.
*/
class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(_config = {}) {
super();
const client = new OpenAI({
baseURL: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
apiKey: null,
maxRetries: 3,
});
this._client = client;
this.model = null; // text-web-gen-ui does not have a model pref.
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("Oobabooga chat: No results!");
if (result.choices.length === 0)
throw new Error("Oobabooga chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since KoboldCPP has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = TextWebGenUiProvider;

View File

@ -0,0 +1,113 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The provider for the TogetherAI provider.
*/
class TogetherAIProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = "mistralai/Mistral-7B-Instruct-v0.1" } = config;
super();
const client = new OpenAI({
baseURL: "https://api.together.xyz/v1",
apiKey: process.env.TOGETHER_AI_API_KEY,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("LMStudio chat: No results!");
if (result.choices.length === 0)
throw new Error("LMStudio chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since LMStudio has no cost basis.
*/
getCost(_usage) {
return 0;
}
}
module.exports = TogetherAIProvider;

View File

@ -85,6 +85,59 @@ class AgentHandler {
if (!process.env.OLLAMA_BASE_PATH) if (!process.env.OLLAMA_BASE_PATH)
throw new Error("Ollama base path must be provided to use agents."); throw new Error("Ollama base path must be provided to use agents.");
break; break;
case "groq":
if (!process.env.GROQ_API_KEY)
throw new Error("Groq API key must be provided to use agents.");
break;
case "togetherai":
if (!process.env.TOGETHER_AI_API_KEY)
throw new Error("TogetherAI API key must be provided to use agents.");
break;
case "azure":
if (!process.env.AZURE_OPENAI_ENDPOINT || !process.env.AZURE_OPENAI_KEY)
throw new Error(
"Azure OpenAI API endpoint and key must be provided to use agents."
);
break;
case "koboldcpp":
if (!process.env.KOBOLD_CPP_BASE_PATH)
throw new Error(
"KoboldCPP must have a valid base path to use for the api."
);
break;
case "localai":
if (!process.env.LOCAL_AI_BASE_PATH)
throw new Error(
"LocalAI must have a valid base path to use for the api."
);
break;
case "gemini":
if (!process.env.GEMINI_API_KEY)
throw new Error("Gemini API key must be provided to use agents.");
break;
case "openrouter":
if (!process.env.OPENROUTER_API_KEY)
throw new Error("OpenRouter API key must be provided to use agents.");
break;
case "mistral":
if (!process.env.MISTRAL_API_KEY)
throw new Error("Mistral API key must be provided to use agents.");
break;
case "generic-openai":
if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
throw new Error("API base path must be provided to use agents.");
break;
case "perplexity":
if (!process.env.PERPLEXITY_API_KEY)
throw new Error("Perplexity API key must be provided to use agents.");
break;
case "textgenwebui":
if (!process.env.TEXT_GEN_WEB_UI_BASE_PATH)
throw new Error(
"TextWebGenUI API base path must be provided to use agents."
);
break;
default: default:
throw new Error("No provider found to power agent cluster."); throw new Error("No provider found to power agent cluster.");
} }
@ -100,6 +153,28 @@ class AgentHandler {
return "server-default"; return "server-default";
case "ollama": case "ollama":
return "llama3:latest"; return "llama3:latest";
case "groq":
return "llama3-70b-8192";
case "togetherai":
return "mistralai/Mixtral-8x7B-Instruct-v0.1";
case "azure":
return "gpt-3.5-turbo";
case "koboldcpp":
return null;
case "gemini":
return "gemini-pro";
case "localai":
return null;
case "openrouter":
return "openrouter/auto";
case "mistral":
return "mistral-medium";
case "generic-openai":
return "gpt-3.5-turbo";
case "perplexity":
return "sonar-small-online";
case "textgenwebui":
return null;
default: default:
return "unknown"; return "unknown";
} }

View File

@ -4,14 +4,28 @@ const { resetMemory } = require("./commands/reset");
const { getVectorDbClass, getLLMProvider } = require("../helpers"); const { getVectorDbClass, getLLMProvider } = require("../helpers");
const { convertToPromptHistory } = require("../helpers/chat/responses"); const { convertToPromptHistory } = require("../helpers/chat/responses");
const { DocumentManager } = require("../DocumentManager"); const { DocumentManager } = require("../DocumentManager");
const { SlashCommandPresets } = require("../../models/slashCommandsPresets");
const VALID_COMMANDS = { const VALID_COMMANDS = {
"/reset": resetMemory, "/reset": resetMemory,
}; };
function grepCommand(message) { async function grepCommand(message, user = null) {
const userPresets = await SlashCommandPresets.getUserPresets(user?.id);
const availableCommands = Object.keys(VALID_COMMANDS); const availableCommands = Object.keys(VALID_COMMANDS);
// Check if the message starts with any preset command
const foundPreset = userPresets.find((p) => message.startsWith(p.command));
if (!!foundPreset) {
// Replace the preset command with the corresponding prompt
const updatedMessage = message.replace(
foundPreset.command,
foundPreset.prompt
);
return updatedMessage;
}
// Check if the message starts with any built-in command
for (let i = 0; i < availableCommands.length; i++) { for (let i = 0; i < availableCommands.length; i++) {
const cmd = availableCommands[i]; const cmd = availableCommands[i];
const re = new RegExp(`^(${cmd})`, "i"); const re = new RegExp(`^(${cmd})`, "i");
@ -20,7 +34,7 @@ function grepCommand(message) {
} }
} }
return null; return message;
} }
async function chatWithWorkspace( async function chatWithWorkspace(
@ -31,10 +45,10 @@ async function chatWithWorkspace(
thread = null thread = null
) { ) {
const uuid = uuidv4(); const uuid = uuidv4();
const command = grepCommand(message); const updatedMessage = await grepCommand(message, user);
if (!!command && Object.keys(VALID_COMMANDS).includes(command)) { if (Object.keys(VALID_COMMANDS).includes(updatedMessage)) {
return await VALID_COMMANDS[command](workspace, message, uuid, user); return await VALID_COMMANDS[updatedMessage](workspace, message, uuid, user);
} }
const LLMConnector = getLLMProvider({ const LLMConnector = getLLMProvider({
@ -164,7 +178,7 @@ async function chatWithWorkspace(
const messages = await LLMConnector.compressMessages( const messages = await LLMConnector.compressMessages(
{ {
systemPrompt: chatPrompt(workspace), systemPrompt: chatPrompt(workspace),
userPrompt: message, userPrompt: updatedMessage,
contextTexts, contextTexts,
chatHistory, chatHistory,
}, },

View File

@ -23,10 +23,10 @@ async function streamChatWithWorkspace(
thread = null thread = null
) { ) {
const uuid = uuidv4(); const uuid = uuidv4();
const command = grepCommand(message); const updatedMessage = await grepCommand(message, user);
if (!!command && Object.keys(VALID_COMMANDS).includes(command)) { if (Object.keys(VALID_COMMANDS).includes(updatedMessage)) {
const data = await VALID_COMMANDS[command]( const data = await VALID_COMMANDS[updatedMessage](
workspace, workspace,
message, message,
uuid, uuid,
@ -185,7 +185,7 @@ async function streamChatWithWorkspace(
const messages = await LLMConnector.compressMessages( const messages = await LLMConnector.compressMessages(
{ {
systemPrompt: chatPrompt(workspace), systemPrompt: chatPrompt(workspace),
userPrompt: message, userPrompt: updatedMessage,
contextTexts, contextTexts,
chatHistory, chatHistory,
}, },

View File

@ -178,7 +178,7 @@ async function getKoboldCPPModels(basePath = null) {
try { try {
const { OpenAI: OpenAIApi } = require("openai"); const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({ const openai = new OpenAIApi({
baseURL: basePath || process.env.LMSTUDIO_BASE_PATH, baseURL: basePath || process.env.KOBOLD_CPP_BASE_PATH,
apiKey: null, apiKey: null,
}); });
const models = await openai.models const models = await openai.models

View File

@ -173,6 +173,10 @@ const KEY_MAPPING = {
envKey: "GENERIC_OPEN_AI_API_KEY", envKey: "GENERIC_OPEN_AI_API_KEY",
checks: [], checks: [],
}, },
GenericOpenAiMaxTokens: {
envKey: "GENERIC_OPEN_AI_MAX_TOKENS",
checks: [nonZero],
},
EmbeddingEngine: { EmbeddingEngine: {
envKey: "EMBEDDING_ENGINE", envKey: "EMBEDDING_ENGINE",