mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-11 01:10:11 +01:00
More agent providers (#1316)
* add OpenRouter support * add mistral agents add perplexity agents add textwebgenui agents
This commit is contained in:
parent
8422f92542
commit
81bc16cc39
5
.vscode/settings.json
vendored
5
.vscode/settings.json
vendored
@ -15,19 +15,24 @@
|
||||
"epub",
|
||||
"GROQ",
|
||||
"hljs",
|
||||
"huggingface",
|
||||
"inferencing",
|
||||
"koboldcpp",
|
||||
"Langchain",
|
||||
"lmstudio",
|
||||
"localai",
|
||||
"mbox",
|
||||
"Milvus",
|
||||
"Mintplex",
|
||||
"moderations",
|
||||
"Ollama",
|
||||
"Oobabooga",
|
||||
"openai",
|
||||
"opendocument",
|
||||
"openrouter",
|
||||
"Qdrant",
|
||||
"Serper",
|
||||
"textgenwebui",
|
||||
"togetherai",
|
||||
"vectordbs",
|
||||
"Weaviate",
|
||||
|
@ -15,6 +15,16 @@ const ENABLED_PROVIDERS = [
|
||||
"azure",
|
||||
"koboldcpp",
|
||||
"togetherai",
|
||||
"openrouter",
|
||||
"mistral",
|
||||
"perplexity",
|
||||
"textgenwebui",
|
||||
// TODO: More agent support.
|
||||
// "generic-openai", // Need to support text-input for agent model input for this to be enabled.
|
||||
// "cohere", // Has tool calling and will need to build explicit support
|
||||
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
|
||||
// "gemini", // Too rate limited and broken in several ways to use for agents.
|
||||
// "gemini", // Too rate limited and broken in several ways to use for agents.
|
||||
];
|
||||
const WARN_PERFORMANCE = [
|
||||
"lmstudio",
|
||||
@ -23,6 +33,8 @@ const WARN_PERFORMANCE = [
|
||||
"koboldcpp",
|
||||
"ollama",
|
||||
"localai",
|
||||
"openrouter",
|
||||
"generic-openai",
|
||||
];
|
||||
|
||||
const LLM_DEFAULT = {
|
||||
|
@ -753,6 +753,16 @@ ${this.getHistory({ to: route.to })
|
||||
return new Providers.KoboldCPPProvider({});
|
||||
case "localai":
|
||||
return new Providers.LocalAIProvider({ model: config.model });
|
||||
case "openrouter":
|
||||
return new Providers.OpenRouterProvider({ model: config.model });
|
||||
case "mistral":
|
||||
return new Providers.MistralProvider({ model: config.model });
|
||||
case "generic-openai":
|
||||
return new Providers.GenericOpenAiProvider({ model: config.model });
|
||||
case "perplexity":
|
||||
return new Providers.PerplexityProvider({ model: config.model });
|
||||
case "textgenwebui":
|
||||
return new Providers.TextWebGenUiProvider({});
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
|
115
server/utils/agents/aibitat/providers/genericOpenAi.js
Normal file
115
server/utils/agents/aibitat/providers/genericOpenAi.js
Normal file
@ -0,0 +1,115 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
|
||||
/**
|
||||
* The provider for the Generic OpenAI provider.
|
||||
* Since we cannot promise the generic provider even supports tool calling
|
||||
* which is nearly 100% likely it does not, we can just wrap it in untooled
|
||||
* which often is far better anyway.
|
||||
*/
|
||||
class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
super();
|
||||
const { model = "gpt-3.5-turbo" } = config;
|
||||
const client = new OpenAI({
|
||||
baseURL: process.env.GENERIC_OPEN_AI_BASE_PATH,
|
||||
apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Generic OpenAI chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Generic OpenAI chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
async complete(messages, functions = null) {
|
||||
try {
|
||||
let completion;
|
||||
if (functions.length > 0) {
|
||||
const { toolCall, text } = await this.functionCall(
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
|
||||
if (toolCall !== null) {
|
||||
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||
return {
|
||||
result: null,
|
||||
functionCall: {
|
||||
name: toolCall.name,
|
||||
arguments: toolCall.arguments,
|
||||
},
|
||||
cost: 0,
|
||||
};
|
||||
}
|
||||
completion = { content: text };
|
||||
}
|
||||
|
||||
if (!completion?.content) {
|
||||
this.providerLog(
|
||||
"Will assume chat completion without tool call inputs."
|
||||
);
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: this.cleanMsgs(messages),
|
||||
});
|
||||
completion = response.choices[0].message;
|
||||
}
|
||||
|
||||
return {
|
||||
result: completion.content,
|
||||
cost: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = GenericOpenAiProvider;
|
@ -4,6 +4,9 @@ const { RetryError } = require("../error.js");
|
||||
|
||||
/**
|
||||
* The provider for the Groq provider.
|
||||
* Using OpenAI tool calling with groq really sucks right now
|
||||
* its just fast and bad. We should probably migrate this to Untooled to improve
|
||||
* coherence.
|
||||
*/
|
||||
class GroqProvider extends Provider {
|
||||
model;
|
||||
|
@ -7,6 +7,11 @@ const TogetherAIProvider = require("./togetherai.js");
|
||||
const AzureOpenAiProvider = require("./azure.js");
|
||||
const KoboldCPPProvider = require("./koboldcpp.js");
|
||||
const LocalAIProvider = require("./localai.js");
|
||||
const OpenRouterProvider = require("./openrouter.js");
|
||||
const MistralProvider = require("./mistral.js");
|
||||
const GenericOpenAiProvider = require("./genericOpenAi.js");
|
||||
const PerplexityProvider = require("./perplexity.js");
|
||||
const TextWebGenUiProvider = require("./textgenwebui.js");
|
||||
|
||||
module.exports = {
|
||||
OpenAIProvider,
|
||||
@ -18,4 +23,9 @@ module.exports = {
|
||||
AzureOpenAiProvider,
|
||||
KoboldCPPProvider,
|
||||
LocalAIProvider,
|
||||
OpenRouterProvider,
|
||||
MistralProvider,
|
||||
GenericOpenAiProvider,
|
||||
PerplexityProvider,
|
||||
TextWebGenUiProvider,
|
||||
};
|
||||
|
116
server/utils/agents/aibitat/providers/mistral.js
Normal file
116
server/utils/agents/aibitat/providers/mistral.js
Normal file
@ -0,0 +1,116 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
|
||||
/**
|
||||
* The provider for the Mistral provider.
|
||||
* Mistral limits what models can call tools and even when using those
|
||||
* the model names change and dont match docs. When you do have the right model
|
||||
* it still fails and is not truly OpenAI compatible so its easier to just wrap
|
||||
* this with Untooled which 100% works since its just text & works far more reliably
|
||||
*/
|
||||
class MistralProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
super();
|
||||
const { model = "mistral-medium" } = config;
|
||||
const client = new OpenAI({
|
||||
baseURL: "https://api.mistral.ai/v1",
|
||||
apiKey: process.env.MISTRAL_API_KEY,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("LMStudio chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("LMStudio chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
async complete(messages, functions = null) {
|
||||
try {
|
||||
let completion;
|
||||
if (functions.length > 0) {
|
||||
const { toolCall, text } = await this.functionCall(
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
|
||||
if (toolCall !== null) {
|
||||
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||
return {
|
||||
result: null,
|
||||
functionCall: {
|
||||
name: toolCall.name,
|
||||
arguments: toolCall.arguments,
|
||||
},
|
||||
cost: 0,
|
||||
};
|
||||
}
|
||||
completion = { content: text };
|
||||
}
|
||||
|
||||
if (!completion?.content) {
|
||||
this.providerLog(
|
||||
"Will assume chat completion without tool call inputs."
|
||||
);
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: this.cleanMsgs(messages),
|
||||
});
|
||||
completion = response.choices[0].message;
|
||||
}
|
||||
|
||||
return {
|
||||
result: completion.content,
|
||||
cost: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = MistralProvider;
|
117
server/utils/agents/aibitat/providers/openrouter.js
Normal file
117
server/utils/agents/aibitat/providers/openrouter.js
Normal file
@ -0,0 +1,117 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
|
||||
/**
|
||||
* The provider for the OpenRouter provider.
|
||||
*/
|
||||
class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
const { model = "openrouter/auto" } = config;
|
||||
super();
|
||||
const client = new OpenAI({
|
||||
baseURL: "https://openrouter.ai/api/v1",
|
||||
apiKey: process.env.OPENROUTER_API_KEY,
|
||||
maxRetries: 3,
|
||||
defaultHeaders: {
|
||||
"HTTP-Referer": "https://useanything.com",
|
||||
"X-Title": "AnythingLLM",
|
||||
},
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("OpenRouter chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("OpenRouter chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
async complete(messages, functions = null) {
|
||||
try {
|
||||
let completion;
|
||||
if (functions.length > 0) {
|
||||
const { toolCall, text } = await this.functionCall(
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
|
||||
if (toolCall !== null) {
|
||||
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||
return {
|
||||
result: null,
|
||||
functionCall: {
|
||||
name: toolCall.name,
|
||||
arguments: toolCall.arguments,
|
||||
},
|
||||
cost: 0,
|
||||
};
|
||||
}
|
||||
completion = { content: text };
|
||||
}
|
||||
|
||||
if (!completion?.content) {
|
||||
this.providerLog(
|
||||
"Will assume chat completion without tool call inputs."
|
||||
);
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: this.cleanMsgs(messages),
|
||||
});
|
||||
completion = response.choices[0].message;
|
||||
}
|
||||
|
||||
return {
|
||||
result: completion.content,
|
||||
cost: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
* Stubbed since OpenRouter has no cost basis.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = OpenRouterProvider;
|
112
server/utils/agents/aibitat/providers/perplexity.js
Normal file
112
server/utils/agents/aibitat/providers/perplexity.js
Normal file
@ -0,0 +1,112 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
|
||||
/**
|
||||
* The provider for the Perplexity provider.
|
||||
*/
|
||||
class PerplexityProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
super();
|
||||
const { model = "sonar-small-online" } = config;
|
||||
const client = new OpenAI({
|
||||
baseURL: "https://api.perplexity.ai",
|
||||
apiKey: process.env.PERPLEXITY_API_KEY ?? null,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Perplexity chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Perplexity chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
async complete(messages, functions = null) {
|
||||
try {
|
||||
let completion;
|
||||
if (functions.length > 0) {
|
||||
const { toolCall, text } = await this.functionCall(
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
|
||||
if (toolCall !== null) {
|
||||
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||
return {
|
||||
result: null,
|
||||
functionCall: {
|
||||
name: toolCall.name,
|
||||
arguments: toolCall.arguments,
|
||||
},
|
||||
cost: 0,
|
||||
};
|
||||
}
|
||||
completion = { content: text };
|
||||
}
|
||||
|
||||
if (!completion?.content) {
|
||||
this.providerLog(
|
||||
"Will assume chat completion without tool call inputs."
|
||||
);
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: this.cleanMsgs(messages),
|
||||
});
|
||||
completion = response.choices[0].message;
|
||||
}
|
||||
|
||||
return {
|
||||
result: completion.content,
|
||||
cost: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = PerplexityProvider;
|
112
server/utils/agents/aibitat/providers/textgenwebui.js
Normal file
112
server/utils/agents/aibitat/providers/textgenwebui.js
Normal file
@ -0,0 +1,112 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
|
||||
/**
|
||||
* The provider for the Oobabooga provider.
|
||||
*/
|
||||
class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(_config = {}) {
|
||||
super();
|
||||
const client = new OpenAI({
|
||||
baseURL: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
|
||||
apiKey: null,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = null; // text-web-gen-ui does not have a model pref.
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Oobabooga chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Oobabooga chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a completion based on the received messages.
|
||||
*
|
||||
* @param messages A list of messages to send to the API.
|
||||
* @param functions
|
||||
* @returns The completion.
|
||||
*/
|
||||
async complete(messages, functions = null) {
|
||||
try {
|
||||
let completion;
|
||||
if (functions.length > 0) {
|
||||
const { toolCall, text } = await this.functionCall(
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
|
||||
if (toolCall !== null) {
|
||||
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||||
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||||
return {
|
||||
result: null,
|
||||
functionCall: {
|
||||
name: toolCall.name,
|
||||
arguments: toolCall.arguments,
|
||||
},
|
||||
cost: 0,
|
||||
};
|
||||
}
|
||||
completion = { content: text };
|
||||
}
|
||||
|
||||
if (!completion?.content) {
|
||||
this.providerLog(
|
||||
"Will assume chat completion without tool call inputs."
|
||||
);
|
||||
const response = await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
messages: this.cleanMsgs(messages),
|
||||
});
|
||||
completion = response.choices[0].message;
|
||||
}
|
||||
|
||||
return {
|
||||
result: completion.content,
|
||||
cost: 0,
|
||||
};
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
* Stubbed since KoboldCPP has no cost basis.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = TextWebGenUiProvider;
|
@ -115,6 +115,29 @@ class AgentHandler {
|
||||
if (!process.env.GEMINI_API_KEY)
|
||||
throw new Error("Gemini API key must be provided to use agents.");
|
||||
break;
|
||||
case "openrouter":
|
||||
if (!process.env.OPENROUTER_API_KEY)
|
||||
throw new Error("OpenRouter API key must be provided to use agents.");
|
||||
break;
|
||||
case "mistral":
|
||||
if (!process.env.MISTRAL_API_KEY)
|
||||
throw new Error("Mistral API key must be provided to use agents.");
|
||||
break;
|
||||
case "generic-openai":
|
||||
if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
|
||||
throw new Error("API base path must be provided to use agents.");
|
||||
break;
|
||||
case "perplexity":
|
||||
if (!process.env.PERPLEXITY_API_KEY)
|
||||
throw new Error("Perplexity API key must be provided to use agents.");
|
||||
break;
|
||||
case "textgenwebui":
|
||||
if (!process.env.TEXT_GEN_WEB_UI_BASE_PATH)
|
||||
throw new Error(
|
||||
"TextWebGenUI API base path must be provided to use agents."
|
||||
);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error("No provider found to power agent cluster.");
|
||||
}
|
||||
@ -142,6 +165,16 @@ class AgentHandler {
|
||||
return "gemini-pro";
|
||||
case "localai":
|
||||
return null;
|
||||
case "openrouter":
|
||||
return "openrouter/auto";
|
||||
case "mistral":
|
||||
return "mistral-medium";
|
||||
case "generic-openai":
|
||||
return "gpt-3.5-turbo";
|
||||
case "perplexity":
|
||||
return "sonar-small-online";
|
||||
case "textgenwebui":
|
||||
return null;
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user