Enable per-workspace provider/model combination (#1042)

* Enable per-workspace provider/model combination

* cleanup

* remove resetWorkspaceChatModels and wipeWorkspaceModelPreference to prevent workspace from resetting model

* add space

---------

Co-authored-by: shatfield4 <seanhatfield5@gmail.com>
This commit is contained in:
Timothy Carambat 2024-04-05 10:58:36 -07:00 committed by GitHub
parent 1be6c57a44
commit 94b58249a3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 836 additions and 461 deletions

View File

@ -1,26 +1,6 @@
import { Info } from "@phosphor-icons/react";
import paths from "@/utils/paths";
export default function AnthropicAiOptions({ settings, showAlert = false }) {
export default function AnthropicAiOptions({ settings }) {
return (
<div className="w-full flex flex-col">
{showAlert && (
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-6 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
<div className="gap-x-2 flex items-center">
<Info size={12} className="hidden md:visible" />
<p className="text-sm md:text-base">
Anthropic as your LLM requires you to set an embedding service to
use.
</p>
</div>
<a
href={paths.settings.embeddingPreference()}
className="text-sm md:text-base my-2 underline"
>
Manage embedding &rarr;
</a>
</div>
)}
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
@ -38,32 +18,34 @@ export default function AnthropicAiOptions({ settings, showAlert = false }) {
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="AnthropicModelPref"
defaultValue={settings?.AnthropicModelPref || "claude-2"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{[
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
{!settings?.credentialsOnly && (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="AnthropicModelPref"
defaultValue={settings?.AnthropicModelPref || "claude-2"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{[
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-haiku-20240307",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
)}
</div>
</div>
);

View File

@ -18,25 +18,27 @@ export default function GeminiLLMOptions({ settings }) {
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="GeminiLLMModelPref"
defaultValue={settings?.GeminiLLMModelPref || "gemini-pro"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["gemini-pro"].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
{!settings?.credentialsOnly && (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="GeminiLLMModelPref"
defaultValue={settings?.GeminiLLMModelPref || "gemini-pro"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["gemini-pro"].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
)}
</div>
</div>
);

View File

@ -17,25 +17,27 @@ export default function GroqAiOptions({ settings }) {
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="GroqModelPref"
defaultValue={settings?.GroqModelPref || "llama2-70b-4096"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
{!settings?.credentialsOnly && (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="GroqModelPref"
defaultValue={settings?.GroqModelPref || "llama2-70b-4096"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
)}
</div>
);
}

View File

@ -46,23 +46,27 @@ export default function LMStudioOptions({ settings, showAlert = false }) {
onBlur={() => setBasePath(basePathValue)}
/>
</div>
<LMStudioModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="LMStudioTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.LMStudioTokenLimit}
required={true}
autoComplete="off"
/>
</div>
{!settings?.credentialsOnly && (
<>
<LMStudioModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="LMStudioTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.LMStudioTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</>
)}
</div>
</div>
);

View File

@ -46,27 +46,31 @@ export default function LocalAiOptions({ settings, showAlert = false }) {
onBlur={() => setBasePath(basePathValue)}
/>
</div>
<LocalAIModelSelection
settings={settings}
basePath={basePath}
apiKey={apiKey}
/>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="LocalAiTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.LocalAiTokenLimit}
required={true}
autoComplete="off"
/>
</div>
{!settings?.credentialsOnly && (
<>
<LocalAIModelSelection
settings={settings}
basePath={basePath}
apiKey={apiKey}
/>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="LocalAiTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.LocalAiTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</>
)}
</div>
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">

View File

@ -24,7 +24,9 @@ export default function MistralOptions({ settings }) {
onBlur={() => setMistralKey(inputValue)}
/>
</div>
<MistralModelSelection settings={settings} apiKey={mistralKey} />
{!settings?.credentialsOnly && (
<MistralModelSelection settings={settings} apiKey={mistralKey} />
)}
</div>
);
}

View File

@ -27,23 +27,27 @@ export default function OllamaLLMOptions({ settings }) {
onBlur={() => setBasePath(basePathValue)}
/>
</div>
<OllamaLLMModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="OllamaLLMTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.OllamaLLMTokenLimit}
required={true}
autoComplete="off"
/>
</div>
{!settings?.credentialsOnly && (
<>
<OllamaLLMModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="OllamaLLMTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.OllamaLLMTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</>
)}
</div>
</div>
);

View File

@ -24,7 +24,9 @@ export default function OpenAiOptions({ settings }) {
onBlur={() => setOpenAIKey(inputValue)}
/>
</div>
<OpenAIModelSelection settings={settings} apiKey={openAIKey} />
{!settings?.credentialsOnly && (
<OpenAIModelSelection settings={settings} apiKey={openAIKey} />
)}
</div>
);
}

View File

@ -19,7 +19,9 @@ export default function OpenRouterOptions({ settings }) {
spellCheck={false}
/>
</div>
<OpenRouterModelSelection settings={settings} />
{!settings?.credentialsOnly && (
<OpenRouterModelSelection settings={settings} />
)}
</div>
);
}
@ -84,7 +86,7 @@ function OpenRouterModelSelection({ settings }) {
<option
key={model.id}
value={model.id}
selected={settings.OpenRouterModelPref === model.id}
selected={settings?.OpenRouterModelPref === model.id}
>
{model.name}
</option>

View File

@ -19,7 +19,9 @@ export default function PerplexityOptions({ settings }) {
spellCheck={false}
/>
</div>
<PerplexityModelSelection settings={settings} />
{!settings?.credentialsOnly && (
<PerplexityModelSelection settings={settings} />
)}
</div>
);
}

View File

@ -19,7 +19,9 @@ export default function TogetherAiOptions({ settings }) {
spellCheck={false}
/>
</div>
<TogetherAiModelSelection settings={settings} />
{!settings?.credentialsOnly && (
<TogetherAiModelSelection settings={settings} />
)}
</div>
);
}
@ -84,7 +86,7 @@ function TogetherAiModelSelection({ settings }) {
<option
key={model.id}
value={model.id}
selected={settings.OpenRouterModelPref === model.id}
selected={settings?.OpenRouterModelPref === model.id}
>
{model.name}
</option>

View File

@ -2,7 +2,7 @@ import System from "@/models/system";
import { useEffect, useState } from "react";
// Providers which cannot use this feature for workspace<>model selection
export const DISABLED_PROVIDERS = ["azure", "lmstudio"];
export const DISABLED_PROVIDERS = ["azure", "lmstudio", "native"];
const PROVIDER_DEFAULT_MODELS = {
openai: [
"gpt-3.5-turbo",

View File

@ -36,6 +36,130 @@ import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
export const AVAILABLE_LLM_PROVIDERS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: (settings) => <OpenAiOptions settings={settings} />,
description: "The standard option for most non-commercial use.",
requiredConfig: ["OpenAiKey"],
},
{
name: "Azure OpenAI",
value: "azure",
logo: AzureOpenAiLogo,
options: (settings) => <AzureAiOptions settings={settings} />,
description: "The enterprise option of OpenAI hosted on Azure services.",
requiredConfig: ["AzureOpenAiEndpoint"],
},
{
name: "Anthropic",
value: "anthropic",
logo: AnthropicLogo,
options: (settings) => <AnthropicAiOptions settings={settings} />,
description: "A friendly AI Assistant hosted by Anthropic.",
requiredConfig: ["AnthropicApiKey"],
},
{
name: "Gemini",
value: "gemini",
logo: GeminiLogo,
options: (settings) => <GeminiLLMOptions settings={settings} />,
description: "Google's largest and most capable AI model",
requiredConfig: ["GeminiLLMApiKey"],
},
{
name: "HuggingFace",
value: "huggingface",
logo: HuggingFaceLogo,
options: (settings) => <HuggingFaceOptions settings={settings} />,
description:
"Access 150,000+ open-source LLMs and the world's AI community",
requiredConfig: [
"HuggingFaceLLMEndpoint",
"HuggingFaceLLMAccessToken",
"HuggingFaceLLMTokenLimit",
],
},
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: (settings) => <OllamaLLMOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
requiredConfig: ["OllamaLLMBasePath"],
},
{
name: "LM Studio",
value: "lmstudio",
logo: LMStudioLogo,
options: (settings) => <LMStudioOptions settings={settings} />,
description:
"Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
requiredConfig: ["LMStudioBasePath"],
},
{
name: "Local AI",
value: "localai",
logo: LocalAiLogo,
options: (settings) => <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
requiredConfig: ["LocalAiApiKey", "LocalAiBasePath", "LocalAiTokenLimit"],
},
{
name: "Together AI",
value: "togetherai",
logo: TogetherAILogo,
options: (settings) => <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
requiredConfig: ["TogetherAiApiKey"],
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: (settings) => <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
requiredConfig: ["MistralApiKey"],
},
{
name: "Perplexity AI",
value: "perplexity",
logo: PerplexityLogo,
options: (settings) => <PerplexityOptions settings={settings} />,
description:
"Run powerful and internet-connected models hosted by Perplexity AI.",
requiredConfig: ["PerplexityApiKey"],
},
{
name: "OpenRouter",
value: "openrouter",
logo: OpenRouterLogo,
options: (settings) => <OpenRouterOptions settings={settings} />,
description: "A unified interface for LLMs.",
requiredConfig: ["OpenRouterApiKey"],
},
{
name: "Groq",
value: "groq",
logo: GroqLogo,
options: (settings) => <GroqAiOptions settings={settings} />,
description:
"The fastest LLM inferencing available for real-time AI applications.",
requiredConfig: ["GroqApiKey"],
},
{
name: "Native",
value: "native",
logo: AnythingLLMIcon,
options: (settings) => <NativeLLMOptions settings={settings} />,
description:
"Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
requiredConfig: [],
},
];
export default function GeneralLLMPreference() {
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
@ -94,120 +218,15 @@ export default function GeneralLLMPreference() {
}, []);
useEffect(() => {
const filtered = LLMS.filter((llm) =>
const filtered = AVAILABLE_LLM_PROVIDERS.filter((llm) =>
llm.name.toLowerCase().includes(searchQuery.toLowerCase())
);
setFilteredLLMs(filtered);
}, [searchQuery, selectedLLM]);
const LLMS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: <OpenAiOptions settings={settings} />,
description: "The standard option for most non-commercial use.",
},
{
name: "Azure OpenAI",
value: "azure",
logo: AzureOpenAiLogo,
options: <AzureAiOptions settings={settings} />,
description: "The enterprise option of OpenAI hosted on Azure services.",
},
{
name: "Anthropic",
value: "anthropic",
logo: AnthropicLogo,
options: <AnthropicAiOptions settings={settings} />,
description: "A friendly AI Assistant hosted by Anthropic.",
},
{
name: "Gemini",
value: "gemini",
logo: GeminiLogo,
options: <GeminiLLMOptions settings={settings} />,
description: "Google's largest and most capable AI model",
},
{
name: "HuggingFace",
value: "huggingface",
logo: HuggingFaceLogo,
options: <HuggingFaceOptions settings={settings} />,
description:
"Access 150,000+ open-source LLMs and the world's AI community",
},
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: <OllamaLLMOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "LM Studio",
value: "lmstudio",
logo: LMStudioLogo,
options: <LMStudioOptions settings={settings} />,
description:
"Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
},
{
name: "Local AI",
value: "localai",
logo: LocalAiLogo,
options: <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Together AI",
value: "togetherai",
logo: TogetherAILogo,
options: <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Perplexity AI",
value: "perplexity",
logo: PerplexityLogo,
options: <PerplexityOptions settings={settings} />,
description:
"Run powerful and internet-connected models hosted by Perplexity AI.",
},
{
name: "OpenRouter",
value: "openrouter",
logo: OpenRouterLogo,
options: <OpenRouterOptions settings={settings} />,
description: "A unified interface for LLMs.",
},
{
name: "Groq",
value: "groq",
logo: GroqLogo,
options: <GroqAiOptions settings={settings} />,
description:
"The fastest LLM inferencing available for real-time AI applications.",
},
{
name: "Native",
value: "native",
logo: AnythingLLMIcon,
options: <NativeLLMOptions settings={settings} />,
description:
"Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
},
];
const selectedLLMObject = LLMS.find((llm) => llm.value === selectedLLM);
const selectedLLMObject = AVAILABLE_LLM_PROVIDERS.find(
(llm) => llm.value === selectedLLM
);
return (
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
<Sidebar />
@ -339,7 +358,9 @@ export default function GeneralLLMPreference() {
className="mt-4 flex flex-col gap-y-1"
>
{selectedLLM &&
LLMS.find((llm) => llm.value === selectedLLM)?.options}
AVAILABLE_LLM_PROVIDERS.find(
(llm) => llm.value === selectedLLM
)?.options?.(settings)}
</div>
</div>
</form>

View File

@ -3,21 +3,20 @@ import useGetProviderModels, {
} from "@/hooks/useGetProvidersModels";
export default function ChatModelSelection({
settings,
provider,
workspace,
setHasChanges,
}) {
const { defaultModels, customModels, loading } = useGetProviderModels(
settings?.LLMProvider
);
if (DISABLED_PROVIDERS.includes(settings?.LLMProvider)) return null;
const { defaultModels, customModels, loading } =
useGetProviderModels(provider);
if (DISABLED_PROVIDERS.includes(provider)) return null;
if (loading) {
return (
<div>
<div className="flex flex-col">
<label htmlFor="name" className="block input-label">
Chat model
Workspace Chat model
</label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
The specific chat model that will be used for this workspace. If
@ -42,8 +41,7 @@ export default function ChatModelSelection({
<div>
<div className="flex flex-col">
<label htmlFor="name" className="block input-label">
Chat model{" "}
<span className="font-normal">({settings?.LLMProvider})</span>
Workspace Chat model
</label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
The specific chat model that will be used for this workspace. If
@ -59,9 +57,6 @@ export default function ChatModelSelection({
}}
className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
>
<option disabled={true} selected={workspace?.chatModel === null}>
System default
</option>
{defaultModels.length > 0 && (
<optgroup label="General models">
{defaultModels.map((model) => {

View File

@ -0,0 +1,151 @@
// This component differs from the main LLMItem in that it shows if a provider is
// "ready for use" and if not - will then highjack the click handler to show a modal
// of the provider options that must be saved to continue.
import { createPortal } from "react-dom";
import ModalWrapper from "@/components/ModalWrapper";
import { useModal } from "@/hooks/useModal";
import { X } from "@phosphor-icons/react";
import System from "@/models/system";
import showToast from "@/utils/toast";
export default function WorkspaceLLM({
llm,
availableLLMs,
settings,
checked,
onClick,
}) {
const { isOpen, openModal, closeModal } = useModal();
const { name, value, logo, description } = llm;
function handleProviderSelection() {
// Determine if provider needs additional setup because its minimum required keys are
// not yet set in settings.
const requiresAdditionalSetup = (llm.requiredConfig || []).some(
(key) => !settings[key]
);
if (requiresAdditionalSetup) {
openModal();
return;
}
onClick(value);
}
return (
<>
<div
onClick={handleProviderSelection}
className={`w-full p-2 rounded-md hover:cursor-pointer hover:bg-white/10 ${
checked ? "bg-white/10" : ""
}`}
>
<input
type="checkbox"
value={value}
className="peer hidden"
checked={checked}
readOnly={true}
formNoValidate={true}
/>
<div className="flex gap-x-4 items-center">
<img
src={logo}
alt={`${name} logo`}
className="w-10 h-10 rounded-md"
/>
<div className="flex flex-col">
<div className="text-sm font-semibold text-white">{name}</div>
<div className="mt-1 text-xs text-[#D2D5DB]">{description}</div>
</div>
</div>
</div>
<SetupProvider
availableLLMs={availableLLMs}
isOpen={isOpen}
provider={value}
closeModal={closeModal}
postSubmit={onClick}
/>
</>
);
}
function SetupProvider({
availableLLMs,
isOpen,
provider,
closeModal,
postSubmit,
}) {
if (!isOpen) return null;
const LLMOption = availableLLMs.find((llm) => llm.value === provider);
if (!LLMOption) return null;
async function handleUpdate(e) {
e.preventDefault();
e.stopPropagation();
const data = {};
const form = new FormData(e.target);
for (var [key, value] of form.entries()) data[key] = value;
const { error } = await System.updateSystem(data);
if (error) {
showToast(`Failed to save ${LLMOption.name} settings: ${error}`, "error");
return;
}
closeModal();
postSubmit();
return false;
}
// Cannot do nested forms, it will cause all sorts of issues, so we portal this out
// to the parent container form so we don't have nested forms.
return createPortal(
<ModalWrapper isOpen={isOpen}>
<div className="relative w-fit max-w-1/2 max-h-full">
<div className="relative bg-main-gradient rounded-xl shadow-[0_4px_14px_rgba(0,0,0,0.25)]">
<div className="flex items-start justify-between p-4 border-b rounded-t border-gray-500/50">
<h3 className="text-xl font-semibold text-white">
Setup {LLMOption.name}
</h3>
<button
onClick={closeModal}
type="button"
className="transition-all duration-300 text-gray-400 bg-transparent hover:border-white/60 rounded-lg text-sm p-1.5 ml-auto inline-flex items-center bg-sidebar-button hover:bg-menu-item-selected-gradient hover:border-slate-100 hover:border-opacity-50 border-transparent border"
data-modal-hide="staticModal"
>
<X className="text-gray-300 text-lg" />
</button>
</div>
<form id="provider-form" onSubmit={handleUpdate}>
<div className="py-[17px] px-[20px] flex flex-col gap-y-6">
<p className="text-sm text-white">
To use {LLMOption.name} as this workspace's LLM you need to set
it up first.
</p>
<div>{LLMOption.options({ credentialsOnly: true })}</div>
</div>
<div className="flex w-full justify-between items-center p-3 space-x-2 border-t rounded-b border-gray-500/50">
<button
type="button"
onClick={closeModal}
className="text-xs px-2 py-1 font-semibold rounded-lg bg-white hover:bg-transparent border-2 border-transparent hover:border-white hover:text-white h-[32px] w-fit -mr-8 whitespace-nowrap shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
>
Cancel
</button>
<button
type="submit"
form="provider-form"
className="text-xs px-2 py-1 font-semibold rounded-lg bg-[#46C8FF] hover:bg-[#2C2F36] border-2 border-transparent hover:border-[#46C8FF] hover:text-white h-[32px] w-fit -mr-8 whitespace-nowrap shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
>
Save {LLMOption.name} settings
</button>
</div>
</form>
</div>
</div>
</ModalWrapper>,
document.getElementById("workspace-chat-settings-container")
);
}

View File

@ -0,0 +1,159 @@
import React, { useEffect, useRef, useState } from "react";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import WorkspaceLLMItem from "./WorkspaceLLMItem";
import { AVAILABLE_LLM_PROVIDERS } from "@/pages/GeneralSettings/LLMPreference";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import ChatModelSelection from "../ChatModelSelection";
const DISABLED_PROVIDERS = ["azure", "lmstudio", "native"];
const LLM_DEFAULT = {
name: "System default",
value: "default",
logo: AnythingLLMIcon,
options: () => <React.Fragment />,
description: "Use the system LLM preference for this workspace.",
requiredConfig: [],
};
export default function WorkspaceLLMSelection({
settings,
workspace,
setHasChanges,
}) {
const [filteredLLMs, setFilteredLLMs] = useState([]);
const [selectedLLM, setSelectedLLM] = useState(
workspace?.chatProvider ?? "default"
);
const [searchQuery, setSearchQuery] = useState("");
const [searchMenuOpen, setSearchMenuOpen] = useState(false);
const searchInputRef = useRef(null);
const LLMS = [LLM_DEFAULT, ...AVAILABLE_LLM_PROVIDERS].filter(
(llm) => !DISABLED_PROVIDERS.includes(llm.value)
);
function updateLLMChoice(selection) {
console.log({ selection });
setSearchQuery("");
setSelectedLLM(selection);
setSearchMenuOpen(false);
setHasChanges(true);
}
function handleXButton() {
if (searchQuery.length > 0) {
setSearchQuery("");
if (searchInputRef.current) searchInputRef.current.value = "";
} else {
setSearchMenuOpen(!searchMenuOpen);
}
}
useEffect(() => {
const filtered = LLMS.filter((llm) =>
llm.name.toLowerCase().includes(searchQuery.toLowerCase())
);
setFilteredLLMs(filtered);
}, [LLMS, searchQuery, selectedLLM]);
const selectedLLMObject = LLMS.find((llm) => llm.value === selectedLLM);
return (
<div className="border-b border-white/40 pb-8">
<div className="flex flex-col">
<label htmlFor="name" className="block input-label">
Workspace LLM Provider
</label>
<p className="text-white text-opacity-60 text-xs font-medium py-1.5">
The specific LLM provider & model that will be used for this
workspace. By default, it uses the system LLM provider and settings.
</p>
</div>
<div className="relative">
<input type="hidden" name="chatProvider" value={selectedLLM} />
{searchMenuOpen && (
<div
className="fixed top-0 left-0 w-full h-full bg-black bg-opacity-70 backdrop-blur-sm z-10"
onClick={() => setSearchMenuOpen(false)}
/>
)}
{searchMenuOpen ? (
<div className="absolute top-0 left-0 w-full max-w-[640px] max-h-[310px] overflow-auto white-scrollbar min-h-[64px] bg-[#18181B] rounded-lg flex flex-col justify-between cursor-pointer border-2 border-[#46C8FF] z-20">
<div className="w-full flex flex-col gap-y-1">
<div className="flex items-center sticky top-0 border-b border-[#9CA3AF] mx-4 bg-[#18181B]">
<MagnifyingGlass
size={20}
weight="bold"
className="absolute left-4 z-30 text-white -ml-4 my-2"
/>
<input
type="text"
name="llm-search"
autoComplete="off"
placeholder="Search all LLM providers"
className="-ml-4 my-2 bg-transparent z-20 pl-12 h-[38px] w-full px-4 py-1 text-sm outline-none focus:border-white text-white placeholder:text-white placeholder:font-medium"
onChange={(e) => setSearchQuery(e.target.value)}
ref={searchInputRef}
onKeyDown={(e) => {
if (e.key === "Enter") e.preventDefault();
}}
/>
<X
size={20}
weight="bold"
className="cursor-pointer text-white hover:text-[#9CA3AF]"
onClick={handleXButton}
/>
</div>
<div className="flex-1 pl-4 pr-2 flex flex-col gap-y-1 overflow-y-auto white-scrollbar pb-4">
{filteredLLMs.map((llm) => {
return (
<WorkspaceLLMItem
llm={llm}
key={llm.name}
availableLLMs={LLMS}
settings={settings}
checked={selectedLLM === llm.value}
onClick={() => updateLLMChoice(llm.value)}
/>
);
})}
</div>
</div>
</div>
) : (
<button
className="w-full max-w-[640px] h-[64px] bg-[#18181B] rounded-lg flex items-center p-[14px] justify-between cursor-pointer border-2 border-transparent hover:border-[#46C8FF] transition-all duration-300"
type="button"
onClick={() => setSearchMenuOpen(true)}
>
<div className="flex gap-x-4 items-center">
<img
src={selectedLLMObject.logo}
alt={`${selectedLLMObject.name} logo`}
className="w-10 h-10 rounded-md"
/>
<div className="flex flex-col text-left">
<div className="text-sm font-semibold text-white">
{selectedLLMObject.name}
</div>
<div className="mt-1 text-xs text-[#D2D5DB]">
{selectedLLMObject.description}
</div>
</div>
</div>
<CaretUpDown size={24} weight="bold" className="text-white" />
</button>
)}
</div>
{selectedLLM !== "default" && (
<div className="mt-4 flex flex-col gap-y-1">
<ChatModelSelection
provider={selectedLLM}
workspace={workspace}
setHasChanges={setHasChanges}
/>
</div>
)}
</div>
);
}

View File

@ -3,11 +3,11 @@ import Workspace from "@/models/workspace";
import showToast from "@/utils/toast";
import { castToType } from "@/utils/types";
import { useEffect, useRef, useState } from "react";
import ChatModelSelection from "./ChatModelSelection";
import ChatHistorySettings from "./ChatHistorySettings";
import ChatPromptSettings from "./ChatPromptSettings";
import ChatTemperatureSettings from "./ChatTemperatureSettings";
import ChatModeSelection from "./ChatModeSelection";
import WorkspaceLLMSelection from "./WorkspaceLLMSelection";
export default function ChatSettings({ workspace }) {
const [settings, setSettings] = useState({});
@ -44,35 +44,45 @@ export default function ChatSettings({ workspace }) {
if (!workspace) return null;
return (
<form
ref={formEl}
onSubmit={handleUpdate}
className="w-1/2 flex flex-col gap-y-6"
>
<ChatModeSelection workspace={workspace} setHasChanges={setHasChanges} />
<ChatModelSelection
settings={settings}
workspace={workspace}
setHasChanges={setHasChanges}
/>
<ChatHistorySettings
workspace={workspace}
setHasChanges={setHasChanges}
/>
<ChatPromptSettings workspace={workspace} setHasChanges={setHasChanges} />
<ChatTemperatureSettings
settings={settings}
workspace={workspace}
setHasChanges={setHasChanges}
/>
{hasChanges && (
<button
type="submit"
className="w-fit transition-all duration-300 border border-slate-200 px-5 py-2.5 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
>
{saving ? "Updating..." : "Update workspace"}
</button>
)}
</form>
<div id="workspace-chat-settings-container">
<form
ref={formEl}
onSubmit={handleUpdate}
id="chat-settings-form"
className="w-1/2 flex flex-col gap-y-6"
>
<WorkspaceLLMSelection
settings={settings}
workspace={workspace}
setHasChanges={setHasChanges}
/>
<ChatModeSelection
workspace={workspace}
setHasChanges={setHasChanges}
/>
<ChatHistorySettings
workspace={workspace}
setHasChanges={setHasChanges}
/>
<ChatPromptSettings
workspace={workspace}
setHasChanges={setHasChanges}
/>
<ChatTemperatureSettings
settings={settings}
workspace={workspace}
setHasChanges={setHasChanges}
/>
{hasChanges && (
<button
type="submit"
form="chat-settings-form"
className="w-fit transition-all duration-300 border border-slate-200 px-5 py-2.5 rounded-lg text-white text-sm items-center flex gap-x-2 hover:bg-slate-200 hover:text-slate-800 focus:ring-gray-800"
>
{saving ? "Updating..." : "Update workspace"}
</button>
)}
</form>
</div>
);
}

View File

@ -508,7 +508,7 @@ function workspaceEndpoints(app) {
if (fs.existsSync(oldPfpPath)) fs.unlinkSync(oldPfpPath);
}
const { workspace, message } = await Workspace.update(
const { workspace, message } = await Workspace._update(
workspaceRecord.id,
{
pfpFilename: uploadedFileName,
@ -547,7 +547,7 @@ function workspaceEndpoints(app) {
if (fs.existsSync(oldPfpPath)) fs.unlinkSync(oldPfpPath);
}
const { workspace, message } = await Workspace.update(
const { workspace, message } = await Workspace._update(
workspaceRecord.id,
{
pfpFilename: null,

View File

@ -57,103 +57,13 @@ const SystemSettings = {
// VectorDB Provider Selection Settings & Configs
// --------------------------------------------------------
VectorDB: vectorDB,
// Pinecone DB Keys
PineConeKey: !!process.env.PINECONE_API_KEY,
PineConeIndex: process.env.PINECONE_INDEX,
// Chroma DB Keys
ChromaEndpoint: process.env.CHROMA_ENDPOINT,
ChromaApiHeader: process.env.CHROMA_API_HEADER,
ChromaApiKey: !!process.env.CHROMA_API_KEY,
// Weaviate DB Keys
WeaviateEndpoint: process.env.WEAVIATE_ENDPOINT,
WeaviateApiKey: process.env.WEAVIATE_API_KEY,
// QDrant DB Keys
QdrantEndpoint: process.env.QDRANT_ENDPOINT,
QdrantApiKey: process.env.QDRANT_API_KEY,
// Milvus DB Keys
MilvusAddress: process.env.MILVUS_ADDRESS,
MilvusUsername: process.env.MILVUS_USERNAME,
MilvusPassword: !!process.env.MILVUS_PASSWORD,
// Zilliz DB Keys
ZillizEndpoint: process.env.ZILLIZ_ENDPOINT,
ZillizApiToken: process.env.ZILLIZ_API_TOKEN,
// AstraDB Keys
AstraDBApplicationToken: process?.env?.ASTRA_DB_APPLICATION_TOKEN,
AstraDBEndpoint: process?.env?.ASTRA_DB_ENDPOINT,
...this.vectorDBPreferenceKeys(),
// --------------------------------------------------------
// LLM Provider Selection Settings & Configs
// --------------------------------------------------------
LLMProvider: llmProvider,
// OpenAI Keys
OpenAiKey: !!process.env.OPEN_AI_KEY,
OpenAiModelPref: process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo",
// Azure + OpenAI Keys
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiModelPref: process.env.OPEN_MODEL_PREF,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
AzureOpenAiTokenLimit: process.env.AZURE_OPENAI_TOKEN_LIMIT || 4096,
// Anthropic Keys
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
// Gemini Keys
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
GeminiLLMModelPref: process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro",
// LMStudio Keys
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
LMStudioModelPref: process.env.LMSTUDIO_MODEL_PREF,
// LocalAI Keys
LocalAiApiKey: !!process.env.LOCAL_AI_API_KEY,
LocalAiBasePath: process.env.LOCAL_AI_BASE_PATH,
LocalAiModelPref: process.env.LOCAL_AI_MODEL_PREF,
LocalAiTokenLimit: process.env.LOCAL_AI_MODEL_TOKEN_LIMIT,
// Ollama LLM Keys
OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH,
OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF,
OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT,
// TogetherAI Keys
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
// Perplexity AI Keys
PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,
// OpenRouter Keys
OpenRouterApiKey: !!process.env.OPENROUTER_API_KEY,
OpenRouterModelPref: process.env.OPENROUTER_MODEL_PREF,
// Mistral AI (API) Keys
MistralApiKey: !!process.env.MISTRAL_API_KEY,
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
// Groq AI API Keys
GroqApiKey: !!process.env.GROQ_API_KEY,
GroqModelPref: process.env.GROQ_MODEL_PREF,
// Native LLM Keys
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,
// HuggingFace Dedicated Inference
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
...this.llmPreferenceKeys(),
// --------------------------------------------------------
// Whisper (Audio transcription) Selection Settings & Configs
@ -273,6 +183,108 @@ const SystemSettings = {
return false;
}
},
vectorDBPreferenceKeys: function () {
return {
// Pinecone DB Keys
PineConeKey: !!process.env.PINECONE_API_KEY,
PineConeIndex: process.env.PINECONE_INDEX,
// Chroma DB Keys
ChromaEndpoint: process.env.CHROMA_ENDPOINT,
ChromaApiHeader: process.env.CHROMA_API_HEADER,
ChromaApiKey: !!process.env.CHROMA_API_KEY,
// Weaviate DB Keys
WeaviateEndpoint: process.env.WEAVIATE_ENDPOINT,
WeaviateApiKey: process.env.WEAVIATE_API_KEY,
// QDrant DB Keys
QdrantEndpoint: process.env.QDRANT_ENDPOINT,
QdrantApiKey: process.env.QDRANT_API_KEY,
// Milvus DB Keys
MilvusAddress: process.env.MILVUS_ADDRESS,
MilvusUsername: process.env.MILVUS_USERNAME,
MilvusPassword: !!process.env.MILVUS_PASSWORD,
// Zilliz DB Keys
ZillizEndpoint: process.env.ZILLIZ_ENDPOINT,
ZillizApiToken: process.env.ZILLIZ_API_TOKEN,
// AstraDB Keys
AstraDBApplicationToken: process?.env?.ASTRA_DB_APPLICATION_TOKEN,
AstraDBEndpoint: process?.env?.ASTRA_DB_ENDPOINT,
};
},
llmPreferenceKeys: function () {
return {
// OpenAI Keys
OpenAiKey: !!process.env.OPEN_AI_KEY,
OpenAiModelPref: process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo",
// Azure + OpenAI Keys
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiModelPref: process.env.OPEN_MODEL_PREF,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
AzureOpenAiTokenLimit: process.env.AZURE_OPENAI_TOKEN_LIMIT || 4096,
// Anthropic Keys
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
// Gemini Keys
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
GeminiLLMModelPref: process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro",
// LMStudio Keys
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
LMStudioModelPref: process.env.LMSTUDIO_MODEL_PREF,
// LocalAI Keys
LocalAiApiKey: !!process.env.LOCAL_AI_API_KEY,
LocalAiBasePath: process.env.LOCAL_AI_BASE_PATH,
LocalAiModelPref: process.env.LOCAL_AI_MODEL_PREF,
LocalAiTokenLimit: process.env.LOCAL_AI_MODEL_TOKEN_LIMIT,
// Ollama LLM Keys
OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH,
OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF,
OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT,
// TogetherAI Keys
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
// Perplexity AI Keys
PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,
// OpenRouter Keys
OpenRouterApiKey: !!process.env.OPENROUTER_API_KEY,
OpenRouterModelPref: process.env.OPENROUTER_MODEL_PREF,
// Mistral AI (API) Keys
MistralApiKey: !!process.env.MISTRAL_API_KEY,
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
// Groq AI API Keys
GroqApiKey: !!process.env.GROQ_API_KEY,
GroqModelPref: process.env.GROQ_MODEL_PREF,
// Native LLM Keys
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,
// HuggingFace Dedicated Inference
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
};
},
};
module.exports.SystemSettings = SystemSettings;

View File

@ -19,6 +19,7 @@ const Workspace = {
"lastUpdatedAt",
"openAiPrompt",
"similarityThreshold",
"chatProvider",
"chatModel",
"topN",
"chatMode",
@ -52,19 +53,42 @@ const Workspace = {
}
},
update: async function (id = null, data = {}) {
update: async function (id = null, updates = {}) {
if (!id) throw new Error("No workspace id provided for update");
const validKeys = Object.keys(data).filter((key) =>
const validFields = Object.keys(updates).filter((key) =>
this.writable.includes(key)
);
if (validKeys.length === 0)
Object.entries(updates).forEach(([key]) => {
if (validFields.includes(key)) return;
delete updates[key];
});
if (Object.keys(updates).length === 0)
return { workspace: { id }, message: "No valid fields to update!" };
// If the user unset the chatProvider we will need
// to then clear the chatModel as well to prevent confusion during
// LLM loading.
if (updates?.chatProvider === "default") {
updates.chatProvider = null;
updates.chatModel = null;
}
return this._update(id, updates);
},
// Explicit update of settings + key validations.
// Only use this method when directly setting a key value
// that takes no user input for the keys being modified.
_update: async function (id = null, data = {}) {
if (!id) throw new Error("No workspace id provided for update");
try {
const workspace = await prisma.workspaces.update({
where: { id },
data, // TODO: strict validation on writables here.
data,
});
return { workspace, message: null };
} catch (error) {
@ -229,47 +253,40 @@ const Workspace = {
}
},
resetWorkspaceChatModels: async () => {
try {
await prisma.workspaces.updateMany({
data: {
chatModel: null,
},
});
return { success: true, error: null };
} catch (error) {
console.error("Error resetting workspace chat models:", error.message);
return { success: false, error: error.message };
}
},
trackChange: async function (prevData, newData, user) {
try {
const { Telemetry } = require("./telemetry");
const { EventLogs } = require("./eventLogs");
if (
!newData?.openAiPrompt ||
newData?.openAiPrompt === this.defaultPrompt ||
newData?.openAiPrompt === prevData?.openAiPrompt
)
return;
await Telemetry.sendTelemetry("workspace_prompt_changed");
await EventLogs.logEvent(
"workspace_prompt_changed",
{
workspaceName: prevData?.name,
prevSystemPrompt: prevData?.openAiPrompt || this.defaultPrompt,
newSystemPrompt: newData?.openAiPrompt,
},
user?.id
);
await this._trackWorkspacePromptChange(prevData, newData, user);
return;
} catch (error) {
console.error("Error tracking workspace change:", error.message);
return;
}
},
// We are only tracking this change to determine the need to a prompt library or
// prompt assistant feature. If this is something you would like to see - tell us on GitHub!
_trackWorkspacePromptChange: async function (prevData, newData, user) {
const { Telemetry } = require("./telemetry");
const { EventLogs } = require("./eventLogs");
if (
!newData?.openAiPrompt ||
newData?.openAiPrompt === this.defaultPrompt ||
newData?.openAiPrompt === prevData?.openAiPrompt
)
return;
await Telemetry.sendTelemetry("workspace_prompt_changed");
await EventLogs.logEvent(
"workspace_prompt_changed",
{
workspaceName: prevData?.name,
prevSystemPrompt: prevData?.openAiPrompt || this.defaultPrompt,
newSystemPrompt: newData?.openAiPrompt,
},
user?.id
);
return;
},
};
module.exports = { Workspace };

View File

@ -0,0 +1,2 @@
-- AlterTable
ALTER TABLE "workspaces" ADD COLUMN "chatProvider" TEXT;

View File

@ -98,6 +98,7 @@ model workspaces {
lastUpdatedAt DateTime @default(now())
openAiPrompt String?
similarityThreshold Float? @default(0.25)
chatProvider String?
chatModel String?
topN Int? @default(4)
chatMode String? @default("chat")

View File

@ -28,7 +28,9 @@ async function streamChatWithForEmbed(
embed.workspace.openAiTemp = parseFloat(temperatureOverride);
const uuid = uuidv4();
const LLMConnector = getLLMProvider(chatModel ?? embed.workspace?.chatModel);
const LLMConnector = getLLMProvider({
model: chatModel ?? embed.workspace?.chatModel,
});
const VectorDb = getVectorDbClass();
const { safe, reasons = [] } = await LLMConnector.isSafe(message);
if (!safe) {

View File

@ -37,7 +37,10 @@ async function chatWithWorkspace(
return await VALID_COMMANDS[command](workspace, message, uuid, user);
}
const LLMConnector = getLLMProvider(workspace?.chatModel);
const LLMConnector = getLLMProvider({
provider: workspace?.chatProvider,
model: workspace?.chatModel,
});
const VectorDb = getVectorDbClass();
const { safe, reasons = [] } = await LLMConnector.isSafe(message);
if (!safe) {

View File

@ -35,7 +35,10 @@ async function streamChatWithWorkspace(
return;
}
const LLMConnector = getLLMProvider(workspace?.chatModel);
const LLMConnector = getLLMProvider({
provider: workspace?.chatProvider,
model: workspace?.chatModel,
});
const VectorDb = getVectorDbClass();
const { safe, reasons = [] } = await LLMConnector.isSafe(message);
if (!safe) {

View File

@ -30,52 +30,53 @@ function getVectorDbClass() {
}
}
function getLLMProvider(modelPreference = null) {
const vectorSelection = process.env.LLM_PROVIDER || "openai";
function getLLMProvider({ provider = null, model = null } = {}) {
const LLMSelection = provider ?? process.env.LLM_PROVIDER ?? "openai";
const embedder = getEmbeddingEngineSelection();
switch (vectorSelection) {
switch (LLMSelection) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return new OpenAiLLM(embedder, modelPreference);
return new OpenAiLLM(embedder, model);
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return new AzureOpenAiLLM(embedder, modelPreference);
return new AzureOpenAiLLM(embedder, model);
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return new AnthropicLLM(embedder, modelPreference);
return new AnthropicLLM(embedder, model);
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder, modelPreference);
return new GeminiLLM(embedder, model);
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return new LMStudioLLM(embedder, modelPreference);
return new LMStudioLLM(embedder, model);
case "localai":
const { LocalAiLLM } = require("../AiProviders/localAi");
return new LocalAiLLM(embedder, modelPreference);
return new LocalAiLLM(embedder, model);
case "ollama":
const { OllamaAILLM } = require("../AiProviders/ollama");
return new OllamaAILLM(embedder, modelPreference);
return new OllamaAILLM(embedder, model);
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return new TogetherAiLLM(embedder, modelPreference);
return new TogetherAiLLM(embedder, model);
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return new PerplexityLLM(embedder, modelPreference);
return new PerplexityLLM(embedder, model);
case "openrouter":
const { OpenRouterLLM } = require("../AiProviders/openRouter");
return new OpenRouterLLM(embedder, modelPreference);
return new OpenRouterLLM(embedder, model);
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return new MistralLLM(embedder, modelPreference);
return new MistralLLM(embedder, model);
case "native":
const { NativeLLM } = require("../AiProviders/native");
return new NativeLLM(embedder, modelPreference);
return new NativeLLM(embedder, model);
case "huggingface":
const { HuggingFaceLLM } = require("../AiProviders/huggingface");
return new HuggingFaceLLM(embedder, modelPreference);
return new HuggingFaceLLM(embedder, model);
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return new GroqLLM(embedder, modelPreference);
return new GroqLLM(embedder, model);
default:
throw new Error("ENV: No LLM_PROVIDER value found in environment!");
}

View File

@ -2,7 +2,6 @@ const KEY_MAPPING = {
LLMProvider: {
envKey: "LLM_PROVIDER",
checks: [isNotEmpty, supportedLLM],
postUpdate: [wipeWorkspaceModelPreference],
},
// OpenAI Settings
OpenAiKey: {
@ -493,15 +492,6 @@ function validHuggingFaceEndpoint(input = "") {
: null;
}
// If the LLMProvider has changed we need to reset all workspace model preferences to
// null since the provider<>model name combination will be invalid for whatever the new
// provider is.
async function wipeWorkspaceModelPreference(key, prev, next) {
if (prev === next) return;
const { Workspace } = require("../../models/workspace");
await Workspace.resetWorkspaceChatModels();
}
// This will force update .env variables which for any which reason were not able to be parsed or
// read from an ENV file as this seems to be a complicating step for many so allowing people to write
// to the process will at least alleviate that issue. It does not perform comprehensive validity checks or sanity checks