mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-09-21 03:40:52 +02:00
[FEAT] LiteLLM provider support (#1424)
* litellm LLM provider support * fix lint error * change import orders fix issue with model retrieval --------- Co-authored-by: Timothy Carambat <rambat1010@gmail.com>
This commit is contained in:
parent
7c6c272051
commit
826ef00da3
@ -88,6 +88,7 @@ Some cool features of AnythingLLM
|
|||||||
- [Groq](https://groq.com/)
|
- [Groq](https://groq.com/)
|
||||||
- [Cohere](https://cohere.com/)
|
- [Cohere](https://cohere.com/)
|
||||||
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
|
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
|
||||||
|
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||||
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
|
||||||
|
|
||||||
**Embedder models:**
|
**Embedder models:**
|
||||||
|
@ -82,6 +82,12 @@ GID='1000'
|
|||||||
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
|
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
|
||||||
# GENERIC_OPEN_AI_API_KEY=sk-123abc
|
# GENERIC_OPEN_AI_API_KEY=sk-123abc
|
||||||
|
|
||||||
|
# LLM_PROVIDER='litellm'
|
||||||
|
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
|
||||||
|
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
|
||||||
|
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
|
||||||
|
# LITE_LLM_API_KEY='sk-123abc'
|
||||||
|
|
||||||
# LLM_PROVIDER='cohere'
|
# LLM_PROVIDER='cohere'
|
||||||
# COHERE_API_KEY=
|
# COHERE_API_KEY=
|
||||||
# COHERE_MODEL_PREF='command-r'
|
# COHERE_MODEL_PREF='command-r'
|
||||||
|
148
frontend/src/components/LLMSelection/LiteLLMOptions/index.jsx
Normal file
148
frontend/src/components/LLMSelection/LiteLLMOptions/index.jsx
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
import { useEffect, useState } from "react";
|
||||||
|
import System from "@/models/system";
|
||||||
|
|
||||||
|
export default function LiteLLMOptions({ settings }) {
|
||||||
|
const [basePathValue, setBasePathValue] = useState(settings?.LiteLLMBasePath);
|
||||||
|
const [basePath, setBasePath] = useState(settings?.LiteLLMBasePath);
|
||||||
|
const [apiKeyValue, setApiKeyValue] = useState(settings?.LiteLLMAPIKey);
|
||||||
|
const [apiKey, setApiKey] = useState(settings?.LiteLLMAPIKey);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="w-full flex flex-col gap-y-4">
|
||||||
|
<div className="w-full flex items-center gap-4">
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-4">
|
||||||
|
Base URL
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="url"
|
||||||
|
name="LiteLLMBasePath"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||||
|
placeholder="http://127.0.0.1:4000"
|
||||||
|
defaultValue={settings?.LiteLLMBasePath}
|
||||||
|
required={true}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
onChange={(e) => setBasePathValue(e.target.value)}
|
||||||
|
onBlur={() => setBasePath(basePathValue)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<LiteLLMModelSelection
|
||||||
|
settings={settings}
|
||||||
|
basePath={basePath}
|
||||||
|
apiKey={apiKey}
|
||||||
|
/>
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-4">
|
||||||
|
Token context window
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
name="LiteLLMTokenLimit"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||||
|
placeholder="4096"
|
||||||
|
min={1}
|
||||||
|
onScroll={(e) => e.target.blur()}
|
||||||
|
defaultValue={settings?.LiteLLMTokenLimit}
|
||||||
|
required={true}
|
||||||
|
autoComplete="off"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className="w-full flex items-center gap-4">
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<div className="flex flex-col gap-y-1 mb-4">
|
||||||
|
<label className="text-white text-sm font-semibold flex items-center gap-x-2">
|
||||||
|
API Key <p className="!text-xs !italic !font-thin">optional</p>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<input
|
||||||
|
type="password"
|
||||||
|
name="LiteLLMAPIKey"
|
||||||
|
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||||
|
placeholder="sk-mysecretkey"
|
||||||
|
defaultValue={settings?.LiteLLMAPIKey ? "*".repeat(20) : ""}
|
||||||
|
autoComplete="off"
|
||||||
|
spellCheck={false}
|
||||||
|
onChange={(e) => setApiKeyValue(e.target.value)}
|
||||||
|
onBlur={() => setApiKey(apiKeyValue)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function LiteLLMModelSelection({ settings, basePath = null, apiKey = null }) {
|
||||||
|
const [customModels, setCustomModels] = useState([]);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
async function findCustomModels() {
|
||||||
|
if (!basePath) {
|
||||||
|
setCustomModels([]);
|
||||||
|
setLoading(false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setLoading(true);
|
||||||
|
const { models } = await System.customModels(
|
||||||
|
"litellm",
|
||||||
|
typeof apiKey === "boolean" ? null : apiKey,
|
||||||
|
basePath
|
||||||
|
);
|
||||||
|
setCustomModels(models || []);
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
findCustomModels();
|
||||||
|
}, [basePath, apiKey]);
|
||||||
|
|
||||||
|
if (loading || customModels.length == 0) {
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-4">
|
||||||
|
Chat Model Selection
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
name="LiteLLMModelPref"
|
||||||
|
disabled={true}
|
||||||
|
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
|
>
|
||||||
|
<option disabled={true} selected={true}>
|
||||||
|
{basePath?.includes("/v1")
|
||||||
|
? "-- loading available models --"
|
||||||
|
: "-- waiting for URL --"}
|
||||||
|
</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col w-60">
|
||||||
|
<label className="text-white text-sm font-semibold block mb-4">
|
||||||
|
Chat Model Selection
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
name="LiteLLMModelPref"
|
||||||
|
required={true}
|
||||||
|
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
|
>
|
||||||
|
{customModels.length > 0 && (
|
||||||
|
<optgroup label="Your loaded models">
|
||||||
|
{customModels.map((model) => {
|
||||||
|
return (
|
||||||
|
<option
|
||||||
|
key={model.id}
|
||||||
|
value={model.id}
|
||||||
|
selected={settings.LiteLLMModelPref === model.id}
|
||||||
|
>
|
||||||
|
{model.id}
|
||||||
|
</option>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</optgroup>
|
||||||
|
)}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
BIN
frontend/src/media/llmprovider/litellm.png
Normal file
BIN
frontend/src/media/llmprovider/litellm.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 49 KiB |
@ -21,6 +21,7 @@ import GroqLogo from "@/media/llmprovider/groq.png";
|
|||||||
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
||||||
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
||||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||||
|
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||||
import PreLoader from "@/components/Preloader";
|
import PreLoader from "@/components/Preloader";
|
||||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||||
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
|
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
|
||||||
@ -38,12 +39,13 @@ import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
|
|||||||
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
|
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
|
||||||
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
|
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
|
||||||
import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
|
import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
|
||||||
|
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
|
||||||
|
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
||||||
|
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
||||||
|
|
||||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||||
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
||||||
import CTAButton from "@/components/lib/CTAButton";
|
import CTAButton from "@/components/lib/CTAButton";
|
||||||
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
|
|
||||||
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
|
||||||
|
|
||||||
export const AVAILABLE_LLM_PROVIDERS = [
|
export const AVAILABLE_LLM_PROVIDERS = [
|
||||||
{
|
{
|
||||||
@ -186,6 +188,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
|||||||
description: "Run Cohere's powerful Command models.",
|
description: "Run Cohere's powerful Command models.",
|
||||||
requiredConfig: ["CohereApiKey"],
|
requiredConfig: ["CohereApiKey"],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "LiteLLM",
|
||||||
|
value: "litellm",
|
||||||
|
logo: LiteLLMLogo,
|
||||||
|
options: (settings) => <LiteLLMOptions settings={settings} />,
|
||||||
|
description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
|
||||||
|
requiredConfig: ["LiteLLMBasePath"],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Generic OpenAI",
|
name: "Generic OpenAI",
|
||||||
value: "generic-openai",
|
value: "generic-openai",
|
||||||
|
@ -17,6 +17,8 @@ import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
|
|||||||
import GroqLogo from "@/media/llmprovider/groq.png";
|
import GroqLogo from "@/media/llmprovider/groq.png";
|
||||||
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
||||||
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
||||||
|
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||||
|
|
||||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||||
import ZillizLogo from "@/media/vectordbs/zilliz.png";
|
import ZillizLogo from "@/media/vectordbs/zilliz.png";
|
||||||
import AstraDBLogo from "@/media/vectordbs/astraDB.png";
|
import AstraDBLogo from "@/media/vectordbs/astraDB.png";
|
||||||
@ -168,6 +170,13 @@ export const LLM_SELECTION_PRIVACY = {
|
|||||||
],
|
],
|
||||||
logo: CohereLogo,
|
logo: CohereLogo,
|
||||||
},
|
},
|
||||||
|
litellm: {
|
||||||
|
name: "LiteLLM",
|
||||||
|
description: [
|
||||||
|
"Your model and chats are only accessible on the server running LiteLLM",
|
||||||
|
],
|
||||||
|
logo: LiteLLMLogo,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export const VECTOR_DB_PRIVACY = {
|
export const VECTOR_DB_PRIVACY = {
|
||||||
|
@ -17,6 +17,8 @@ import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
|
|||||||
import GroqLogo from "@/media/llmprovider/groq.png";
|
import GroqLogo from "@/media/llmprovider/groq.png";
|
||||||
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
||||||
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
||||||
|
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||||
|
|
||||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||||
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
|
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
|
||||||
@ -34,14 +36,15 @@ import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
|
|||||||
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
|
import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
|
||||||
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
|
import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
|
||||||
import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
|
import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
|
||||||
|
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
|
||||||
|
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
||||||
|
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
||||||
|
|
||||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||||
import System from "@/models/system";
|
import System from "@/models/system";
|
||||||
import paths from "@/utils/paths";
|
import paths from "@/utils/paths";
|
||||||
import showToast from "@/utils/toast";
|
import showToast from "@/utils/toast";
|
||||||
import { useNavigate } from "react-router-dom";
|
import { useNavigate } from "react-router-dom";
|
||||||
import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
|
|
||||||
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
|
||||||
|
|
||||||
const TITLE = "LLM Preference";
|
const TITLE = "LLM Preference";
|
||||||
const DESCRIPTION =
|
const DESCRIPTION =
|
||||||
@ -164,6 +167,13 @@ const LLMS = [
|
|||||||
options: (settings) => <CohereAiOptions settings={settings} />,
|
options: (settings) => <CohereAiOptions settings={settings} />,
|
||||||
description: "Run Cohere's powerful Command models.",
|
description: "Run Cohere's powerful Command models.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "LiteLLM",
|
||||||
|
value: "litellm",
|
||||||
|
logo: LiteLLMLogo,
|
||||||
|
options: (settings) => <LiteLLMOptions settings={settings} />,
|
||||||
|
description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Generic OpenAI",
|
name: "Generic OpenAI",
|
||||||
value: "generic-openai",
|
value: "generic-openai",
|
||||||
|
@ -79,6 +79,12 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
|
|||||||
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
|
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
|
||||||
# GENERIC_OPEN_AI_API_KEY=sk-123abc
|
# GENERIC_OPEN_AI_API_KEY=sk-123abc
|
||||||
|
|
||||||
|
# LLM_PROVIDER='litellm'
|
||||||
|
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
|
||||||
|
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
|
||||||
|
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
|
||||||
|
# LITE_LLM_API_KEY='sk-123abc'
|
||||||
|
|
||||||
# LLM_PROVIDER='cohere'
|
# LLM_PROVIDER='cohere'
|
||||||
# COHERE_API_KEY=
|
# COHERE_API_KEY=
|
||||||
# COHERE_MODEL_PREF='command-r'
|
# COHERE_MODEL_PREF='command-r'
|
||||||
|
@ -408,6 +408,12 @@ const SystemSettings = {
|
|||||||
TextGenWebUITokenLimit: process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT,
|
TextGenWebUITokenLimit: process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT,
|
||||||
TextGenWebUIAPIKey: !!process.env.TEXT_GEN_WEB_UI_API_KEY,
|
TextGenWebUIAPIKey: !!process.env.TEXT_GEN_WEB_UI_API_KEY,
|
||||||
|
|
||||||
|
// LiteLLM Keys
|
||||||
|
LiteLLMModelPref: process.env.LITE_LLM_MODEL_PREF,
|
||||||
|
LiteLLMTokenLimit: process.env.LITE_LLM_MODEL_TOKEN_LIMIT,
|
||||||
|
LiteLLMBasePath: process.env.LITE_LLM_BASE_PATH,
|
||||||
|
LiteLLMApiKey: !!process.env.LITE_LLM_API_KEY,
|
||||||
|
|
||||||
// Generic OpenAI Keys
|
// Generic OpenAI Keys
|
||||||
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
|
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
|
||||||
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
|
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
|
||||||
|
178
server/utils/AiProviders/liteLLM/index.js
Normal file
178
server/utils/AiProviders/liteLLM/index.js
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||||
|
const {
|
||||||
|
writeResponseChunk,
|
||||||
|
clientAbortedHandler,
|
||||||
|
} = require("../../helpers/chat/responses");
|
||||||
|
|
||||||
|
class LiteLLM {
|
||||||
|
constructor(embedder = null, modelPreference = null) {
|
||||||
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
if (!process.env.LITE_LLM_BASE_PATH)
|
||||||
|
throw new Error(
|
||||||
|
"LiteLLM must have a valid base path to use for the api."
|
||||||
|
);
|
||||||
|
|
||||||
|
this.basePath = process.env.LITE_LLM_BASE_PATH;
|
||||||
|
this.openai = new OpenAIApi({
|
||||||
|
baseURL: this.basePath,
|
||||||
|
apiKey: process.env.LITE_LLM_API_KEY ?? null,
|
||||||
|
});
|
||||||
|
this.model = modelPreference ?? process.env.LITE_LLM_MODEL_PREF ?? null;
|
||||||
|
this.maxTokens = process.env.LITE_LLM_MODEL_TOKEN_LIMIT ?? 1024;
|
||||||
|
if (!this.model) throw new Error("LiteLLM must have a valid model set.");
|
||||||
|
this.limits = {
|
||||||
|
history: this.promptWindowLimit() * 0.15,
|
||||||
|
system: this.promptWindowLimit() * 0.15,
|
||||||
|
user: this.promptWindowLimit() * 0.7,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!embedder)
|
||||||
|
console.warn(
|
||||||
|
"No embedding provider defined for LiteLLM - falling back to NativeEmbedder for embedding!"
|
||||||
|
);
|
||||||
|
this.embedder = !embedder ? new NativeEmbedder() : embedder;
|
||||||
|
this.defaultTemp = 0.7;
|
||||||
|
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
log(text, ...args) {
|
||||||
|
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
|
||||||
|
}
|
||||||
|
|
||||||
|
#appendContext(contextTexts = []) {
|
||||||
|
if (!contextTexts || !contextTexts.length) return "";
|
||||||
|
return (
|
||||||
|
"\nContext:\n" +
|
||||||
|
contextTexts
|
||||||
|
.map((text, i) => {
|
||||||
|
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||||
|
})
|
||||||
|
.join("")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamingEnabled() {
|
||||||
|
return "streamGetChatCompletion" in this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the user set a value for the token limit
|
||||||
|
// and if undefined - assume 4096 window.
|
||||||
|
promptWindowLimit() {
|
||||||
|
const limit = process.env.LITE_LLM_MODEL_TOKEN_LIMIT || 4096;
|
||||||
|
if (!limit || isNaN(Number(limit)))
|
||||||
|
throw new Error("No token context limit was set.");
|
||||||
|
return Number(limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Short circuit since we have no idea if the model is valid or not
|
||||||
|
// in pre-flight for generic endpoints
|
||||||
|
isValidChatCompletionModel(_modelName = "") {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
constructPrompt({
|
||||||
|
systemPrompt = "",
|
||||||
|
contextTexts = [],
|
||||||
|
chatHistory = [],
|
||||||
|
userPrompt = "",
|
||||||
|
}) {
|
||||||
|
const prompt = {
|
||||||
|
role: "system",
|
||||||
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||||
|
};
|
||||||
|
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
|
||||||
|
}
|
||||||
|
|
||||||
|
async isSafe(_input = "") {
|
||||||
|
// Not implemented so must be stubbed
|
||||||
|
return { safe: true, reasons: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
const result = await this.openai.chat.completions
|
||||||
|
.create({
|
||||||
|
model: this.model,
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: parseInt(this.maxTokens), // LiteLLM requires int
|
||||||
|
})
|
||||||
|
.catch((e) => {
|
||||||
|
throw new Error(e.response.data.error.message);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
|
||||||
|
return null;
|
||||||
|
return result.choices[0].message.content;
|
||||||
|
}
|
||||||
|
|
||||||
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||||
|
const streamRequest = await this.openai.chat.completions.create({
|
||||||
|
model: this.model,
|
||||||
|
stream: true,
|
||||||
|
messages,
|
||||||
|
temperature,
|
||||||
|
max_tokens: parseInt(this.maxTokens), // LiteLLM requires int
|
||||||
|
});
|
||||||
|
return streamRequest;
|
||||||
|
}
|
||||||
|
|
||||||
|
handleStream(response, stream, responseProps) {
|
||||||
|
const { uuid = uuidv4(), sources = [] } = responseProps;
|
||||||
|
|
||||||
|
return new Promise(async (resolve) => {
|
||||||
|
let fullText = "";
|
||||||
|
|
||||||
|
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||||
|
response.on("close", handleAbort);
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const message = chunk?.choices?.[0];
|
||||||
|
const token = message?.delta?.content;
|
||||||
|
|
||||||
|
if (token) {
|
||||||
|
fullText += token;
|
||||||
|
writeResponseChunk(response, {
|
||||||
|
uuid,
|
||||||
|
sources: [],
|
||||||
|
type: "textResponseChunk",
|
||||||
|
textResponse: token,
|
||||||
|
close: false,
|
||||||
|
error: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// LiteLLM does not give a finish reason in stream until the final chunk
|
||||||
|
if (message.finish_reason || message.finish_reason === "stop") {
|
||||||
|
writeResponseChunk(response, {
|
||||||
|
uuid,
|
||||||
|
sources,
|
||||||
|
type: "textResponseChunk",
|
||||||
|
textResponse: "",
|
||||||
|
close: true,
|
||||||
|
error: false,
|
||||||
|
});
|
||||||
|
response.removeListener("close", handleAbort);
|
||||||
|
resolve(fullText);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||||
|
async embedTextInput(textInput) {
|
||||||
|
return await this.embedder.embedTextInput(textInput);
|
||||||
|
}
|
||||||
|
async embedChunks(textChunks = []) {
|
||||||
|
return await this.embedder.embedChunks(textChunks);
|
||||||
|
}
|
||||||
|
|
||||||
|
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||||
|
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||||
|
const messageArray = this.constructPrompt(promptArgs);
|
||||||
|
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
LiteLLM,
|
||||||
|
};
|
@ -16,6 +16,7 @@ const SUPPORT_CUSTOM_MODELS = [
|
|||||||
"openrouter",
|
"openrouter",
|
||||||
"lmstudio",
|
"lmstudio",
|
||||||
"koboldcpp",
|
"koboldcpp",
|
||||||
|
"litellm",
|
||||||
"elevenlabs-tts",
|
"elevenlabs-tts",
|
||||||
];
|
];
|
||||||
|
|
||||||
@ -44,6 +45,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
|||||||
return await getLMStudioModels(basePath);
|
return await getLMStudioModels(basePath);
|
||||||
case "koboldcpp":
|
case "koboldcpp":
|
||||||
return await getKoboldCPPModels(basePath);
|
return await getKoboldCPPModels(basePath);
|
||||||
|
case "litellm":
|
||||||
|
return await liteLLMModels(basePath, apiKey);
|
||||||
case "elevenlabs-tts":
|
case "elevenlabs-tts":
|
||||||
return await getElevenLabsModels(apiKey);
|
return await getElevenLabsModels(apiKey);
|
||||||
default:
|
default:
|
||||||
@ -164,6 +167,25 @@ async function localAIModels(basePath = null, apiKey = null) {
|
|||||||
return { models, error: null };
|
return { models, error: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function liteLLMModels(basePath = null, apiKey = null) {
|
||||||
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
const openai = new OpenAIApi({
|
||||||
|
baseURL: basePath || process.env.LITE_LLM_BASE_PATH,
|
||||||
|
apiKey: apiKey || process.env.LITE_LLM_API_KEY || null,
|
||||||
|
});
|
||||||
|
const models = await openai.models
|
||||||
|
.list()
|
||||||
|
.then((results) => results.data)
|
||||||
|
.catch((e) => {
|
||||||
|
console.error(`LiteLLM:listModels`, e.message);
|
||||||
|
return [];
|
||||||
|
});
|
||||||
|
|
||||||
|
// Api Key was successful so lets save it for future uses
|
||||||
|
if (models.length > 0 && !!apiKey) process.env.LITE_LLM_API_KEY = apiKey;
|
||||||
|
return { models, error: null };
|
||||||
|
}
|
||||||
|
|
||||||
async function getLMStudioModels(basePath = null) {
|
async function getLMStudioModels(basePath = null) {
|
||||||
try {
|
try {
|
||||||
const { OpenAI: OpenAIApi } = require("openai");
|
const { OpenAI: OpenAIApi } = require("openai");
|
||||||
|
@ -86,6 +86,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
|
|||||||
case "cohere":
|
case "cohere":
|
||||||
const { CohereLLM } = require("../AiProviders/cohere");
|
const { CohereLLM } = require("../AiProviders/cohere");
|
||||||
return new CohereLLM(embedder, model);
|
return new CohereLLM(embedder, model);
|
||||||
|
case "litellm":
|
||||||
|
const { LiteLLM } = require("../AiProviders/liteLLM");
|
||||||
|
return new LiteLLM(embedder, model);
|
||||||
case "generic-openai":
|
case "generic-openai":
|
||||||
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
|
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
|
||||||
return new GenericOpenAiLLM(embedder, model);
|
return new GenericOpenAiLLM(embedder, model);
|
||||||
|
@ -160,6 +160,24 @@ const KEY_MAPPING = {
|
|||||||
checks: [],
|
checks: [],
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// LiteLLM Settings
|
||||||
|
LiteLLMModelPref: {
|
||||||
|
envKey: "LITE_LLM_MODEL_PREF",
|
||||||
|
checks: [isNotEmpty],
|
||||||
|
},
|
||||||
|
LiteLLMTokenLimit: {
|
||||||
|
envKey: "LITE_LLM_MODEL_TOKEN_LIMIT",
|
||||||
|
checks: [nonZero],
|
||||||
|
},
|
||||||
|
LiteLLMBasePath: {
|
||||||
|
envKey: "LITE_LLM_BASE_PATH",
|
||||||
|
checks: [isValidURL],
|
||||||
|
},
|
||||||
|
LiteLLMApiKey: {
|
||||||
|
envKey: "LITE_LLM_API_KEY",
|
||||||
|
checks: [],
|
||||||
|
},
|
||||||
|
|
||||||
// Generic OpenAI InferenceSettings
|
// Generic OpenAI InferenceSettings
|
||||||
GenericOpenAiBasePath: {
|
GenericOpenAiBasePath: {
|
||||||
envKey: "GENERIC_OPEN_AI_BASE_PATH",
|
envKey: "GENERIC_OPEN_AI_BASE_PATH",
|
||||||
@ -469,6 +487,7 @@ function supportedLLM(input = "") {
|
|||||||
"koboldcpp",
|
"koboldcpp",
|
||||||
"textgenwebui",
|
"textgenwebui",
|
||||||
"cohere",
|
"cohere",
|
||||||
|
"litellm",
|
||||||
"generic-openai",
|
"generic-openai",
|
||||||
].includes(input);
|
].includes(input);
|
||||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||||
|
Loading…
Reference in New Issue
Block a user