Add generic OpenAI endpoint support (#1178)

* Add generic OpenAI endpoint support

* allow any input for model in case provider does not support models endpoint
This commit is contained in:
Timothy Carambat 2024-04-23 13:06:07 -07:00 committed by GitHub
parent ac6ca13f60
commit df17fbda36
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 441 additions and 108 deletions

View File

@ -66,6 +66,12 @@ GID='1000'
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama2-70b-4096
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc
###########################################
######## Embedding API SElECTION ##########
###########################################

View File

@ -0,0 +1,70 @@
export default function GenericOpenAiOptions({ settings }) {
return (
<div className="flex gap-4 flex-wrap">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Base URL
</label>
<input
type="url"
name="GenericOpenAiBasePath"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="eg: https://proxy.openai.com"
defaultValue={settings?.GenericOpenAiBasePath}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
API Key
</label>
<input
type="password"
name="GenericOpenAiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Generic service API Key"
defaultValue={settings?.GenericOpenAiKey ? "*".repeat(20) : ""}
required={false}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Name
</label>
<input
type="text"
name="GenericOpenAiModelPref"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Model id used for chat requests"
defaultValue={settings?.GenericOpenAiModelPref}
required={true}
autoComplete="off"
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="GenericOpenAiTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Content window limit (eg: 4096)"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.GenericOpenAiTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</>
)}
</div>
);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

View File

@ -5,6 +5,7 @@ import System from "@/models/system";
import showToast from "@/utils/toast";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png";
import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
@ -19,6 +20,7 @@ import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import GroqLogo from "@/media/llmprovider/groq.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
@ -150,6 +152,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
"The fastest LLM inferencing available for real-time AI applications.",
requiredConfig: ["GroqApiKey"],
},
{
name: "Generic OpenAI",
value: "generic-openai",
logo: GenericOpenAiLogo,
options: (settings) => <GenericOpenAiOptions settings={settings} />,
description:
"Connect to any OpenAi-compatible service via a custom configuration",
},
{
name: "Native",
value: "native",

View File

@ -2,6 +2,7 @@ import PreLoader from "@/components/Preloader";
import System from "@/models/system";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png";
import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
@ -136,6 +137,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: GroqLogo,
},
"generic-openai": {
name: "Generic OpenAI compatible service",
description: [
"Data is shared according to the terms of service applicable with your generic endpoint provider.",
],
logo: GenericOpenAiLogo,
},
};
export const VECTOR_DB_PRIVACY = {

View File

@ -1,6 +1,7 @@
import { MagnifyingGlass } from "@phosphor-icons/react";
import { useEffect, useState, useRef } from "react";
import OpenAiLogo from "@/media/llmprovider/openai.png";
import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
@ -15,6 +16,7 @@ import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import GroqLogo from "@/media/llmprovider/groq.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
@ -38,6 +40,120 @@ const TITLE = "LLM Preference";
const DESCRIPTION =
"AnythingLLM can work with many LLM providers. This will be the service which handles chatting.";
const LLMS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: (settings) => <OpenAiOptions settings={settings} />,
description: "The standard option for most non-commercial use.",
},
{
name: "Azure OpenAI",
value: "azure",
logo: AzureOpenAiLogo,
options: (settings) => <AzureAiOptions settings={settings} />,
description: "The enterprise option of OpenAI hosted on Azure services.",
},
{
name: "Anthropic",
value: "anthropic",
logo: AnthropicLogo,
options: (settings) => <AnthropicAiOptions settings={settings} />,
description: "A friendly AI Assistant hosted by Anthropic.",
},
{
name: "Gemini",
value: "gemini",
logo: GeminiLogo,
options: (settings) => <GeminiLLMOptions settings={settings} />,
description: "Google's largest and most capable AI model",
},
{
name: "HuggingFace",
value: "huggingface",
logo: HuggingFaceLogo,
options: (settings) => <HuggingFaceOptions settings={settings} />,
description:
"Access 150,000+ open-source LLMs and the world's AI community",
},
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: (settings) => <OllamaLLMOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "LM Studio",
value: "lmstudio",
logo: LMStudioLogo,
options: (settings) => <LMStudioOptions settings={settings} />,
description:
"Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
},
{
name: "Local AI",
value: "localai",
logo: LocalAiLogo,
options: (settings) => <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Together AI",
value: "togetherai",
logo: TogetherAILogo,
options: (settings) => <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: (settings) => <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Perplexity AI",
value: "perplexity",
logo: PerplexityLogo,
options: (settings) => <PerplexityOptions settings={settings} />,
description:
"Run powerful and internet-connected models hosted by Perplexity AI.",
},
{
name: "OpenRouter",
value: "openrouter",
logo: OpenRouterLogo,
options: (settings) => <OpenRouterOptions settings={settings} />,
description: "A unified interface for LLMs.",
},
{
name: "Groq",
value: "groq",
logo: GroqLogo,
options: (settings) => <GroqAiOptions settings={settings} />,
description:
"The fastest LLM inferencing available for real-time AI applications.",
},
{
name: "Generic OpenAI",
value: "generic-openai",
logo: GenericOpenAiLogo,
options: (settings) => <GenericOpenAiOptions settings={settings} />,
description:
"Connect to any OpenAi-compatible service via a custom configuration",
},
{
name: "Native",
value: "native",
logo: AnythingLLMIcon,
options: (settings) => <NativeLLMOptions settings={settings} />,
description:
"Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
},
];
export default function LLMPreference({
setHeader,
setForwardBtn,
@ -61,112 +177,6 @@ export default function LLMPreference({
fetchKeys();
}, []);
const LLMS = [
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: <OpenAiOptions settings={settings} />,
description: "The standard option for most non-commercial use.",
},
{
name: "Azure OpenAI",
value: "azure",
logo: AzureOpenAiLogo,
options: <AzureAiOptions settings={settings} />,
description: "The enterprise option of OpenAI hosted on Azure services.",
},
{
name: "Anthropic",
value: "anthropic",
logo: AnthropicLogo,
options: <AnthropicAiOptions settings={settings} />,
description: "A friendly AI Assistant hosted by Anthropic.",
},
{
name: "Gemini",
value: "gemini",
logo: GeminiLogo,
options: <GeminiLLMOptions settings={settings} />,
description: "Google's largest and most capable AI model",
},
{
name: "HuggingFace",
value: "huggingface",
logo: HuggingFaceLogo,
options: <HuggingFaceOptions settings={settings} />,
description:
"Access 150,000+ open-source LLMs and the world's AI community",
},
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: <OllamaLLMOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "LM Studio",
value: "lmstudio",
logo: LMStudioLogo,
options: <LMStudioOptions settings={settings} />,
description:
"Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
},
{
name: "Local AI",
value: "localai",
logo: LocalAiLogo,
options: <LocalAiOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Together AI",
value: "togetherai",
logo: TogetherAILogo,
options: <TogetherAiOptions settings={settings} />,
description: "Run open source models from Together AI.",
},
{
name: "Mistral",
value: "mistral",
logo: MistralLogo,
options: <MistralOptions settings={settings} />,
description: "Run open source models from Mistral AI.",
},
{
name: "Perplexity AI",
value: "perplexity",
logo: PerplexityLogo,
options: <PerplexityOptions settings={settings} />,
description:
"Run powerful and internet-connected models hosted by Perplexity AI.",
},
{
name: "OpenRouter",
value: "openrouter",
logo: OpenRouterLogo,
options: <OpenRouterOptions settings={settings} />,
description: "A unified interface for LLMs.",
},
{
name: "Groq",
value: "groq",
logo: GroqLogo,
options: <GroqAiOptions settings={settings} />,
description:
"The fastest LLM inferencing available for real-time AI applications.",
},
{
name: "Native",
value: "native",
logo: AnythingLLMIcon,
options: <NativeLLMOptions settings={settings} />,
description:
"Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
},
];
function handleForward() {
if (hiddenSubmitButtonRef.current) {
hiddenSubmitButtonRef.current.click();
@ -251,7 +261,7 @@ export default function LLMPreference({
</div>
<div className="mt-4 flex flex-col gap-y-1">
{selectedLLM &&
LLMS.find((llm) => llm.value === selectedLLM)?.options}
LLMS.find((llm) => llm.value === selectedLLM)?.options(settings)}
</div>
<button
type="submit"

View File

@ -63,6 +63,12 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama2-70b-4096
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc
###########################################
######## Embedding API SElECTION ##########
###########################################

View File

@ -358,6 +358,12 @@ const SystemSettings = {
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
// Generic OpenAI Keys
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
};
},
};

View File

@ -0,0 +1,193 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { chatPrompt } = require("../../chats");
const { handleDefaultStreamResponse } = require("../../helpers/chat/responses");
class GenericOpenAiLLM {
constructor(embedder = null, modelPreference = null) {
const { Configuration, OpenAIApi } = require("openai");
if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
throw new Error(
"GenericOpenAI must have a valid base path to use for the api."
);
this.basePath = process.env.GENERIC_OPEN_AI_BASE_PATH;
const config = new Configuration({
basePath: this.basePath,
apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
});
this.openai = new OpenAIApi(config);
this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
if (!this.model)
throw new Error("GenericOpenAI must have a valid model set.");
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
if (!embedder)
console.warn(
"No embedding provider defined for GenericOpenAiLLM - falling back to NativeEmbedder for embedding!"
);
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Short circuit since we have no idea if the model is valid or not
// in pre-flight for generic endpoints
isValidChatCompletionModel(_modelName = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async isSafe(_input = "") {
// Not implemented so must be stubbed
return { safe: true, reasons: [] };
}
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const textResponse = await this.openai
.createChatCompletion({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
})
.then((json) => {
const res = json.data;
if (!res.hasOwnProperty("choices"))
throw new Error("GenericOpenAI chat: No results!");
if (res.choices.length === 0)
throw new Error("GenericOpenAI chat: No results length!");
return res.choices[0].message.content;
})
.catch((error) => {
throw new Error(
`GenericOpenAI::createChatCompletion failed with: ${error.message}`
);
});
return textResponse;
}
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
},
{ responseType: "stream" }
);
return streamRequest;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const { data } = await this.openai
.createChatCompletion({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.response.data.error.message);
});
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
messages,
temperature,
},
{ responseType: "stream" }
);
return streamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponse(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
GenericOpenAiLLM,
};

View File

@ -77,8 +77,13 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return new GroqLLM(embedder, model);
case "generic-openai":
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
return new GenericOpenAiLLM(embedder, model);
default:
throw new Error("ENV: No LLM_PROVIDER value found in environment!");
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
);
}
}

View File

@ -132,6 +132,24 @@ const KEY_MAPPING = {
checks: [nonZero],
},
// Generic OpenAI InferenceSettings
GenericOpenAiBasePath: {
envKey: "GENERIC_OPEN_AI_BASE_PATH",
checks: [isValidURL],
},
GenericOpenAiModelPref: {
envKey: "GENERIC_OPEN_AI_MODEL_PREF",
checks: [isNotEmpty],
},
GenericOpenAiTokenLimit: {
envKey: "GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
GenericOpenAiKey: {
envKey: "GENERIC_OPEN_AI_API_KEY",
checks: [],
},
EmbeddingEngine: {
envKey: "EMBEDDING_ENGINE",
checks: [supportedEmbeddingModel],
@ -375,6 +393,7 @@ function supportedLLM(input = "") {
"perplexity",
"openrouter",
"groq",
"generic-openai",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}