[FIX] Fix manual input model pref for agents (#1848)

* patch llm providers that have manual inputs for model pref

* refactor agent model fallback
update UI to show disabled providers to stop questions about provider limitations

* patch log on startup

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
Sean Hatfield 2024-07-11 14:03:24 -07:00 committed by GitHub
parent 79656718b2
commit 8f0af88332
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 73 additions and 6 deletions

View File

@ -1,7 +1,9 @@
import useGetProviderModels, {
DISABLED_PROVIDERS,
} from "@/hooks/useGetProvidersModels";
import paths from "@/utils/paths";
import { useTranslation } from "react-i18next";
import { Link, useParams } from "react-router-dom";
// These models do NOT support function calling
function supportedModel(provider, model = "") {
@ -18,11 +20,32 @@ export default function AgentModelSelection({
workspace,
setHasChanges,
}) {
const { slug } = useParams();
const { defaultModels, customModels, loading } =
useGetProviderModels(provider);
const { t } = useTranslation();
if (DISABLED_PROVIDERS.includes(provider)) return null;
if (DISABLED_PROVIDERS.includes(provider)) {
return (
<div className="w-full h-10 justify-center items-center flex">
<p className="text-sm font-base text-white text-opacity-60 text-center">
Multi-model support is not supported for this provider yet.
<br />
Agent's will use{" "}
<Link
to={paths.workspace.settings.chatSettings(slug)}
className="underline"
>
the model set for the workspace
</Link>{" "}
or{" "}
<Link to={paths.settings.llmPreference()} className="underline">
the model set for the system.
</Link>
</p>
</div>
);
}
if (loading) {
return (

View File

@ -2,6 +2,7 @@ import useGetProviderModels, {
DISABLED_PROVIDERS,
} from "@/hooks/useGetProvidersModels";
import { useTranslation } from "react-i18next";
export default function ChatModelSelection({
provider,
workspace,

View File

@ -3,8 +3,10 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import WorkspaceLLMItem from "./WorkspaceLLMItem";
import { AVAILABLE_LLM_PROVIDERS } from "@/pages/GeneralSettings/LLMPreference";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import ChatModelSelection from "../ChatModelSelection";
import ChatModelSelection from "./ChatModelSelection";
import { useTranslation } from "react-i18next";
import { Link } from "react-router-dom";
import paths from "@/utils/paths";
// Some providers can only be associated with a single model.
// In that case there is no selection to be made so we can just move on.
@ -148,7 +150,22 @@ export default function WorkspaceLLMSelection({
</button>
)}
</div>
{!NO_MODEL_SELECTION.includes(selectedLLM) && (
{NO_MODEL_SELECTION.includes(selectedLLM) ? (
<>
{selectedLLM !== "default" && (
<div className="w-full h-10 justify-center items-center flex mt-4">
<p className="text-sm font-base text-white text-opacity-60 text-center">
Multi-model support is not supported for this provider yet.
<br />
This workspace will use{" "}
<Link to={paths.settings.llmPreference()} className="underline">
the model set for the system.
</Link>
</p>
</div>
)}
</>
) : (
<div className="mt-4 flex flex-col gap-y-1">
<ChatModelSelection
provider={selectedLLM}

View File

@ -10,6 +10,12 @@ const { USER_AGENT, WORKSPACE_AGENT } = require("./defaults");
class AgentHandler {
#invocationUUID;
#funcsToLoad = [];
#noProviderModelDefault = {
azure: "OPEN_MODEL_PREF",
lmstudio: "LMSTUDIO_MODEL_PREF",
textgenwebui: null, // does not even use `model` in API req
"generic-openai": "GENERIC_OPEN_AI_MODEL_PREF",
};
invocation = null;
aibitat = null;
channel = null;
@ -172,7 +178,7 @@ class AgentHandler {
case "mistral":
return "mistral-medium";
case "generic-openai":
return "gpt-3.5-turbo";
return null;
case "perplexity":
return "sonar-small-online";
case "textgenwebui":
@ -182,10 +188,30 @@ class AgentHandler {
}
}
/**
* Finds or assumes the model preference value to use for API calls.
* If multi-model loading is supported, we use their agent model selection of the workspace
* If not supported, we attempt to fallback to the system provider value for the LLM preference
* and if that fails - we assume a reasonable base model to exist.
* @returns {string} the model preference value to use in API calls
*/
#fetchModel() {
if (!Object.keys(this.#noProviderModelDefault).includes(this.provider))
return this.invocation.workspace.agentModel || this.#providerDefault();
// Provider has no reliable default (cant load many models) - so we need to look at system
// for the model param.
const sysModelKey = this.#noProviderModelDefault[this.provider];
if (!!sysModelKey)
return process.env[sysModelKey] ?? this.#providerDefault();
// If all else fails - look at the provider default list
return this.#providerDefault();
}
#providerSetupAndCheck() {
this.provider = this.invocation.workspace.agentProvider || "openai";
this.model =
this.invocation.workspace.agentModel || this.#providerDefault();
this.model = this.#fetchModel();
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
this.#checkSetup();
}