mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-19 12:40:09 +01:00
Support DeepSeek (#2377)
* add deepseek support * lint * update deepseek context length * add deepseek to onboarding --------- Co-authored-by: Timothy Carambat <rambat1010@gmail.com>
This commit is contained in:
parent
30645831a1
commit
7390bae6f6
@ -87,6 +87,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
|
||||
- [Fireworks AI (chat models)](https://fireworks.ai/)
|
||||
- [Perplexity (chat models)](https://www.perplexity.ai/)
|
||||
- [OpenRouter (chat models)](https://openrouter.ai/)
|
||||
- [DeepSeek (chat models)](https://deepseek.com/)
|
||||
- [Mistral](https://mistral.ai/)
|
||||
- [Groq](https://groq.com/)
|
||||
- [Cohere](https://cohere.com/)
|
||||
|
100
frontend/src/components/LLMSelection/DeepSeekOptions/index.jsx
Normal file
100
frontend/src/components/LLMSelection/DeepSeekOptions/index.jsx
Normal file
@ -0,0 +1,100 @@
|
||||
import { useState, useEffect } from "react";
|
||||
import System from "@/models/system";
|
||||
|
||||
export default function DeepSeekOptions({ settings }) {
|
||||
const [inputValue, setInputValue] = useState(settings?.DeepSeekApiKey);
|
||||
const [deepSeekApiKey, setDeepSeekApiKey] = useState(
|
||||
settings?.DeepSeekApiKey
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex gap-[36px] mt-1.5">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="DeepSeekApiKey"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||
placeholder="DeepSeek API Key"
|
||||
defaultValue={settings?.DeepSeekApiKey ? "*".repeat(20) : ""}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
onChange={(e) => setInputValue(e.target.value)}
|
||||
onBlur={() => setDeepSeekApiKey(inputValue)}
|
||||
/>
|
||||
</div>
|
||||
{!settings?.credentialsOnly && (
|
||||
<DeepSeekModelSelection settings={settings} apiKey={deepSeekApiKey} />
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function DeepSeekModelSelection({ apiKey, settings }) {
|
||||
const [models, setModels] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
async function findCustomModels() {
|
||||
if (!apiKey) {
|
||||
setModels([]);
|
||||
setLoading(true);
|
||||
return;
|
||||
}
|
||||
|
||||
setLoading(true);
|
||||
const { models } = await System.customModels(
|
||||
"deepseek",
|
||||
typeof apiKey === "boolean" ? null : apiKey
|
||||
);
|
||||
setModels(models || []);
|
||||
setLoading(false);
|
||||
}
|
||||
findCustomModels();
|
||||
}, [apiKey]);
|
||||
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Chat Model Selection
|
||||
</label>
|
||||
<select
|
||||
name="DeepSeekModelPref"
|
||||
disabled={true}
|
||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<option disabled={true} selected={true}>
|
||||
-- loading available models --
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Chat Model Selection
|
||||
</label>
|
||||
<select
|
||||
name="DeepSeekModelPref"
|
||||
required={true}
|
||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
{models.map((model) => (
|
||||
<option
|
||||
key={model.id}
|
||||
value={model.id}
|
||||
selected={settings?.DeepSeekModelPref === model.id}
|
||||
>
|
||||
{model.name}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
);
|
||||
}
|
BIN
frontend/src/media/llmprovider/deepseek.png
Normal file
BIN
frontend/src/media/llmprovider/deepseek.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
@ -25,6 +25,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||
|
||||
import PreLoader from "@/components/Preloader";
|
||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||
@ -48,6 +49,7 @@ import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
|
||||
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
||||
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
||||
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
|
||||
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
||||
|
||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
||||
@ -211,6 +213,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
||||
description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
|
||||
requiredConfig: ["LiteLLMBasePath"],
|
||||
},
|
||||
{
|
||||
name: "DeepSeek",
|
||||
value: "deepseek",
|
||||
logo: DeepSeekLogo,
|
||||
options: (settings) => <DeepSeekOptions settings={settings} />,
|
||||
description: "Run DeepSeek's powerful LLMs.",
|
||||
requiredConfig: ["DeepSeekApiKey"],
|
||||
},
|
||||
{
|
||||
name: "Generic OpenAI",
|
||||
value: "generic-openai",
|
||||
|
@ -20,6 +20,7 @@ import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
||||
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
||||
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||
|
||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||
import ZillizLogo from "@/media/vectordbs/zilliz.png";
|
||||
@ -196,6 +197,11 @@ export const LLM_SELECTION_PRIVACY = {
|
||||
],
|
||||
logo: AWSBedrockLogo,
|
||||
},
|
||||
deepseek: {
|
||||
name: "DeepSeek",
|
||||
description: ["Your model and chat contents are visible to DeepSeek"],
|
||||
logo: DeepSeekLogo,
|
||||
},
|
||||
};
|
||||
|
||||
export const VECTOR_DB_PRIVACY = {
|
||||
|
@ -20,6 +20,7 @@ import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
|
||||
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
|
||||
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
|
||||
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
|
||||
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
|
||||
|
||||
import CohereLogo from "@/media/llmprovider/cohere.png";
|
||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||
@ -43,6 +44,7 @@ import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
|
||||
import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
|
||||
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
|
||||
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
|
||||
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
|
||||
|
||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import System from "@/models/system";
|
||||
@ -186,6 +188,13 @@ const LLMS = [
|
||||
options: (settings) => <LiteLLMOptions settings={settings} />,
|
||||
description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
|
||||
},
|
||||
{
|
||||
name: "DeepSeek",
|
||||
value: "deepseek",
|
||||
logo: DeepSeekLogo,
|
||||
options: (settings) => <DeepSeekOptions settings={settings} />,
|
||||
description: "Run DeepSeek's powerful LLMs.",
|
||||
},
|
||||
{
|
||||
name: "Generic OpenAI",
|
||||
value: "generic-openai",
|
||||
|
@ -23,6 +23,7 @@ const ENABLED_PROVIDERS = [
|
||||
"generic-openai",
|
||||
"bedrock",
|
||||
"fireworksai",
|
||||
"deepseek",
|
||||
// TODO: More agent support.
|
||||
// "cohere", // Has tool calling and will need to build explicit support
|
||||
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
|
||||
|
@ -506,6 +506,10 @@ const SystemSettings = {
|
||||
|
||||
// VoyageAi API Keys
|
||||
VoyageAiApiKey: !!process.env.VOYAGEAI_API_KEY,
|
||||
|
||||
// DeepSeek API Keys
|
||||
DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
|
||||
DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
|
||||
};
|
||||
},
|
||||
|
||||
|
127
server/utils/AiProviders/deepseek/index.js
Normal file
127
server/utils/AiProviders/deepseek/index.js
Normal file
@ -0,0 +1,127 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
} = require("../../helpers/chat/responses");
|
||||
const { MODEL_MAP } = require("../modelMap");
|
||||
|
||||
class DeepSeekLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
if (!process.env.DEEPSEEK_API_KEY)
|
||||
throw new Error("No DeepSeek API key was set.");
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
|
||||
this.openai = new OpenAIApi({
|
||||
apiKey: process.env.DEEPSEEK_API_KEY,
|
||||
baseURL: "https://api.deepseek.com/v1",
|
||||
});
|
||||
this.model =
|
||||
modelPreference || process.env.DEEPSEEK_MODEL_PREF || "deepseek-chat";
|
||||
this.limits = {
|
||||
history: this.promptWindowLimit() * 0.15,
|
||||
system: this.promptWindowLimit() * 0.15,
|
||||
user: this.promptWindowLimit() * 0.7,
|
||||
};
|
||||
|
||||
this.embedder = embedder ?? new NativeEmbedder();
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
if (!contextTexts || !contextTexts.length) return "";
|
||||
return (
|
||||
"\nContext:\n" +
|
||||
contextTexts
|
||||
.map((text, i) => {
|
||||
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||
})
|
||||
.join("")
|
||||
);
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
static promptWindowLimit(modelName) {
|
||||
return MODEL_MAP.deepseek[modelName] ?? 8192;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
return MODEL_MAP.deepseek[this.model] ?? 8192;
|
||||
}
|
||||
|
||||
async isValidChatCompletionModel(modelName = "") {
|
||||
const models = await this.openai.models.list().catch(() => ({ data: [] }));
|
||||
return models.data.some((model) => model.id === modelName);
|
||||
}
|
||||
|
||||
constructPrompt({
|
||||
systemPrompt = "",
|
||||
contextTexts = [],
|
||||
chatHistory = [],
|
||||
userPrompt = "",
|
||||
}) {
|
||||
const prompt = {
|
||||
role: "system",
|
||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||
};
|
||||
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`DeepSeek chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const result = await this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature,
|
||||
})
|
||||
.catch((e) => {
|
||||
throw new Error(e.message);
|
||||
});
|
||||
|
||||
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
|
||||
return null;
|
||||
return result.choices[0].message.content;
|
||||
}
|
||||
|
||||
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`DeepSeek chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
temperature,
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
handleStream(response, stream, responseProps) {
|
||||
return handleDefaultStreamResponseV2(response, stream, responseProps);
|
||||
}
|
||||
|
||||
async embedTextInput(textInput) {
|
||||
return await this.embedder.embedTextInput(textInput);
|
||||
}
|
||||
async embedChunks(textChunks = []) {
|
||||
return await this.embedder.embedChunks(textChunks);
|
||||
}
|
||||
|
||||
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||
const messageArray = this.constructPrompt(promptArgs);
|
||||
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
DeepSeekLLM,
|
||||
};
|
@ -53,6 +53,10 @@ const MODEL_MAP = {
|
||||
"gpt-4": 8_192,
|
||||
"gpt-4-32k": 32_000,
|
||||
},
|
||||
deepseek: {
|
||||
"deepseek-chat": 128_000,
|
||||
"deepseek-coder": 128_000,
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = { MODEL_MAP };
|
||||
|
@ -783,6 +783,8 @@ ${this.getHistory({ to: route.to })
|
||||
return new Providers.AWSBedrockProvider({});
|
||||
case "fireworksai":
|
||||
return new Providers.FireworksAIProvider({ model: config.model });
|
||||
case "deepseek":
|
||||
return new Providers.DeepSeekProvider({ model: config.model });
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
|
@ -174,6 +174,14 @@ class Provider {
|
||||
apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? "not-used",
|
||||
...config,
|
||||
});
|
||||
case "deepseek":
|
||||
return new ChatOpenAI({
|
||||
configuration: {
|
||||
baseURL: "https://api.deepseek.com/v1",
|
||||
},
|
||||
apiKey: process.env.DEEPSEEK_API_KEY ?? null,
|
||||
...config,
|
||||
});
|
||||
default:
|
||||
throw new Error(`Unsupported provider ${provider} for this task.`);
|
||||
}
|
||||
|
118
server/utils/agents/aibitat/providers/deepseek.js
Normal file
118
server/utils/agents/aibitat/providers/deepseek.js
Normal file
@ -0,0 +1,118 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
const { toValidNumber } = require("../../../http/index.js");
|
||||
|
||||
class DeepSeekProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
super();
|
||||
const { model = "deepseek-chat" } = config;
|
||||
const client = new OpenAI({
|
||||
baseURL: "https://api.deepseek.com/v1",
|
||||
apiKey: process.env.DEEPSEEK_API_KEY ?? null,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
this.maxTokens = process.env.DEEPSEEK_MAX_TOKENS
|
||||
? toValidNumber(process.env.DEEPSEEK_MAX_TOKENS, 1024)
|
||||
: 1024;
|
||||
}
|
||||
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
max_tokens: this.maxTokens,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("DeepSeek chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("DeepSeek chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
async complete(messages, functions = null) {
|
||||
try {
|
||||
let completion;
|
||||
if (functions.length > 0) {
|
||||
const { toolCall, text } = await this.functionCall(
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
|
||||
if (toolCall !== null) {
|
||||
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||
return {
|
||||
result: null,
|
||||
functionCall: {
|
||||
name: toolCall.name,
|
||||
arguments: toolCall.arguments,
|
||||
},
|
||||
cost: 0,
|
||||
};
|
||||
}
|
||||
completion = { content: text };
|
||||
}
|
||||
|
||||
if (!completion?.content) {
|
||||
this.providerLog(
|
||||
"Will assume chat completion without tool call inputs."
|
||||
);
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: this.cleanMsgs(messages),
|
||||
});
|
||||
completion = response.choices[0].message;
|
||||
}
|
||||
|
||||
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
|
||||
// from calling the exact same function over and over in a loop within a single chat exchange
|
||||
// _but_ we should enable it to call previously used tools in a new chat interaction.
|
||||
this.deduplicator.reset("runs");
|
||||
return {
|
||||
result: completion.content,
|
||||
cost: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = DeepSeekProvider;
|
@ -14,6 +14,7 @@ const PerplexityProvider = require("./perplexity.js");
|
||||
const TextWebGenUiProvider = require("./textgenwebui.js");
|
||||
const AWSBedrockProvider = require("./bedrock.js");
|
||||
const FireworksAIProvider = require("./fireworksai.js");
|
||||
const DeepSeekProvider = require("./deepseek.js");
|
||||
|
||||
module.exports = {
|
||||
OpenAIProvider,
|
||||
@ -28,6 +29,7 @@ module.exports = {
|
||||
OpenRouterProvider,
|
||||
MistralProvider,
|
||||
GenericOpenAiProvider,
|
||||
DeepSeekProvider,
|
||||
PerplexityProvider,
|
||||
TextWebGenUiProvider,
|
||||
AWSBedrockProvider,
|
||||
|
@ -162,6 +162,10 @@ class AgentHandler {
|
||||
"FireworksAI API Key must be provided to use agents."
|
||||
);
|
||||
break;
|
||||
case "deepseek":
|
||||
if (!process.env.DEEPSEEK_API_KEY)
|
||||
throw new Error("DeepSeek API Key must be provided to use agents.");
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
@ -206,6 +210,8 @@ class AgentHandler {
|
||||
return null;
|
||||
case "fireworksai":
|
||||
return null;
|
||||
case "deepseek":
|
||||
return "deepseek-chat";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ const SUPPORT_CUSTOM_MODELS = [
|
||||
"litellm",
|
||||
"elevenlabs-tts",
|
||||
"groq",
|
||||
"deepseek",
|
||||
];
|
||||
|
||||
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
||||
@ -53,6 +54,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
||||
return await getElevenLabsModels(apiKey);
|
||||
case "groq":
|
||||
return await getGroqAiModels(apiKey);
|
||||
case "deepseek":
|
||||
return await getDeepSeekModels(apiKey);
|
||||
default:
|
||||
return { models: [], error: "Invalid provider for custom models" };
|
||||
}
|
||||
@ -419,6 +422,31 @@ async function getElevenLabsModels(apiKey = null) {
|
||||
return { models, error: null };
|
||||
}
|
||||
|
||||
async function getDeepSeekModels(apiKey = null) {
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
const openai = new OpenAIApi({
|
||||
apiKey: apiKey || process.env.DEEPSEEK_API_KEY,
|
||||
baseURL: "https://api.deepseek.com/v1",
|
||||
});
|
||||
const models = await openai.models
|
||||
.list()
|
||||
.then((results) => results.data)
|
||||
.then((models) =>
|
||||
models.map((model) => ({
|
||||
id: model.id,
|
||||
name: model.id,
|
||||
organization: model.owned_by,
|
||||
}))
|
||||
)
|
||||
.catch((e) => {
|
||||
console.error(`DeepSeek:listModels`, e.message);
|
||||
return [];
|
||||
});
|
||||
|
||||
if (models.length > 0 && !!apiKey) process.env.DEEPSEEK_API_KEY = apiKey;
|
||||
return { models, error: null };
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getCustomModels,
|
||||
};
|
||||
|
@ -159,6 +159,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
|
||||
case "bedrock":
|
||||
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
|
||||
return new AWSBedrockLLM(embedder, model);
|
||||
case "deepseek":
|
||||
const { DeepSeekLLM } = require("../AiProviders/deepseek");
|
||||
return new DeepSeekLLM(embedder, model);
|
||||
default:
|
||||
throw new Error(
|
||||
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
|
||||
|
@ -501,6 +501,16 @@ const KEY_MAPPING = {
|
||||
envKey: "TTS_PIPER_VOICE_MODEL",
|
||||
checks: [],
|
||||
},
|
||||
|
||||
// DeepSeek Options
|
||||
DeepSeekApiKey: {
|
||||
envKey: "DEEPSEEK_API_KEY",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
DeepSeekModelPref: {
|
||||
envKey: "DEEPSEEK_MODEL_PREF",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
};
|
||||
|
||||
function isNotEmpty(input = "") {
|
||||
@ -602,6 +612,7 @@ function supportedLLM(input = "") {
|
||||
"litellm",
|
||||
"generic-openai",
|
||||
"bedrock",
|
||||
"deepseek",
|
||||
].includes(input);
|
||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user