2024-04-16 19:50:10 +02:00
/ * *
* A service that provides an AI client to create a completion .
* /
2024-06-10 23:31:39 +02:00
/ * *
* @ typedef { Object } LangChainModelConfig
* @ property { ( string | null ) } baseURL - Override the default base URL process . env for this provider
* @ property { ( string | null ) } apiKey - Override the default process . env for this provider
* @ property { ( number | null ) } temperature - Override the default temperature
* @ property { ( string | null ) } model - Overrides model used for provider .
* /
2024-04-30 21:04:24 +02:00
const { ChatOpenAI } = require ( "@langchain/openai" ) ;
const { ChatAnthropic } = require ( "@langchain/anthropic" ) ;
2024-07-24 01:35:37 +02:00
const { ChatBedrockConverse } = require ( "@langchain/aws" ) ;
2024-06-10 23:31:39 +02:00
const { ChatOllama } = require ( "@langchain/community/chat_models/ollama" ) ;
const { toValidNumber } = require ( "../../../http" ) ;
2024-08-15 21:13:28 +02:00
const { getLLMProviderClass } = require ( "../../../helpers" ) ;
2024-06-10 23:31:39 +02:00
2024-05-08 01:35:47 +02:00
const DEFAULT _WORKSPACE _PROMPT =
"You are a helpful ai assistant who can assist the user and use tools available to help answer the users prompts and questions." ;
2024-04-17 23:04:51 +02:00
2024-04-16 19:50:10 +02:00
class Provider {
_client ;
constructor ( client ) {
if ( this . constructor == Provider ) {
2024-05-08 01:35:47 +02:00
return ;
2024-04-16 19:50:10 +02:00
}
this . _client = client ;
}
2024-05-08 01:35:47 +02:00
providerLog ( text , ... args ) {
console . log (
` \x 1b[36m[AgentLLM ${ this ? . model ? ` - ${ this . model } ` : "" } ] \x 1b[0m ${ text } ` ,
... args
) ;
}
2024-04-16 19:50:10 +02:00
get client ( ) {
return this . _client ;
}
2024-04-17 23:04:51 +02:00
2024-06-10 23:31:39 +02:00
/ * *
*
* @ param { string } provider - the string key of the provider LLM being loaded .
* @ param { LangChainModelConfig } config - Config to be used to override default connection object .
* @ returns
* /
2024-04-17 23:04:51 +02:00
static LangChainChatModel ( provider = "openai" , config = { } ) {
switch ( provider ) {
2024-06-10 23:31:39 +02:00
// Cloud models
2024-04-17 23:04:51 +02:00
case "openai" :
return new ChatOpenAI ( {
2024-04-30 21:04:24 +02:00
apiKey : process . env . OPEN _AI _KEY ,
2024-04-17 23:04:51 +02:00
... config ,
} ) ;
case "anthropic" :
return new ChatAnthropic ( {
2024-04-30 21:04:24 +02:00
apiKey : process . env . ANTHROPIC _API _KEY ,
2024-04-17 23:04:51 +02:00
... config ,
} ) ;
2024-06-10 23:31:39 +02:00
case "groq" :
2024-04-17 23:04:51 +02:00
return new ChatOpenAI ( {
2024-06-10 23:31:39 +02:00
configuration : {
baseURL : "https://api.groq.com/openai/v1" ,
} ,
apiKey : process . env . GROQ _API _KEY ,
... config ,
} ) ;
case "mistral" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://api.mistral.ai/v1" ,
} ,
apiKey : process . env . MISTRAL _API _KEY ? ? null ,
... config ,
} ) ;
case "openrouter" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://openrouter.ai/api/v1" ,
defaultHeaders : {
2024-07-22 20:05:34 +02:00
"HTTP-Referer" : "https://anythingllm.com" ,
2024-06-10 23:31:39 +02:00
"X-Title" : "AnythingLLM" ,
} ,
} ,
apiKey : process . env . OPENROUTER _API _KEY ? ? null ,
... config ,
} ) ;
case "perplexity" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://api.perplexity.ai" ,
} ,
apiKey : process . env . PERPLEXITY _API _KEY ? ? null ,
... config ,
} ) ;
case "togetherai" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://api.together.xyz/v1" ,
} ,
apiKey : process . env . TOGETHER _AI _API _KEY ? ? null ,
... config ,
} ) ;
case "generic-openai" :
return new ChatOpenAI ( {
configuration : {
baseURL : process . env . GENERIC _OPEN _AI _BASE _PATH ,
} ,
apiKey : process . env . GENERIC _OPEN _AI _API _KEY ,
maxTokens : toValidNumber (
process . env . GENERIC _OPEN _AI _MAX _TOKENS ,
1024
) ,
... config ,
} ) ;
2024-07-24 01:35:37 +02:00
case "bedrock" :
return new ChatBedrockConverse ( {
model : process . env . AWS _BEDROCK _LLM _MODEL _PREFERENCE ,
region : process . env . AWS _BEDROCK _LLM _REGION ,
credentials : {
accessKeyId : process . env . AWS _BEDROCK _LLM _ACCESS _KEY _ID ,
secretAccessKey : process . env . AWS _BEDROCK _LLM _ACCESS _KEY ,
} ,
... config ,
} ) ;
2024-09-16 21:10:44 +02:00
case "fireworksai" :
return new ChatOpenAI ( {
apiKey : process . env . FIREWORKS _AI _LLM _API _KEY ,
... config ,
} ) ;
2024-10-15 21:43:14 +02:00
case "apipie" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://apipie.ai/v1" ,
} ,
apiKey : process . env . APIPIE _LLM _API _KEY ? ? null ,
... config ,
} ) ;
case "deepseek" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://api.deepseek.com/v1" ,
} ,
apiKey : process . env . DEEPSEEK _API _KEY ? ? null ,
... config ,
} ) ;
2024-10-22 01:32:49 +02:00
case "xai" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://api.x.ai/v1" ,
} ,
apiKey : process . env . XAI _LLM _API _KEY ? ? null ,
... config ,
} ) ;
2024-06-10 23:31:39 +02:00
// OSS Model Runners
// case "anythingllm_ollama":
// return new ChatOllama({
// baseUrl: process.env.PLACEHOLDER,
// ...config,
// });
case "ollama" :
return new ChatOllama ( {
baseUrl : process . env . OLLAMA _BASE _PATH ,
2024-04-17 23:04:51 +02:00
... config ,
} ) ;
2024-06-10 23:31:39 +02:00
case "lmstudio" :
return new ChatOpenAI ( {
configuration : {
baseURL : process . env . LMSTUDIO _BASE _PATH ? . replace ( /\/+$/ , "" ) ,
} ,
apiKey : "not-used" , // Needs to be specified or else will assume OpenAI
... config ,
} ) ;
case "koboldcpp" :
return new ChatOpenAI ( {
configuration : {
baseURL : process . env . KOBOLD _CPP _BASE _PATH ,
} ,
apiKey : "not-used" ,
... config ,
} ) ;
case "localai" :
return new ChatOpenAI ( {
configuration : {
baseURL : process . env . LOCAL _AI _BASE _PATH ,
} ,
apiKey : process . env . LOCAL _AI _API _KEY ? ? "not-used" ,
... config ,
} ) ;
case "textgenwebui" :
return new ChatOpenAI ( {
configuration : {
baseURL : process . env . TEXT _GEN _WEB _UI _BASE _PATH ,
} ,
apiKey : process . env . TEXT _GEN _WEB _UI _API _KEY ? ? "not-used" ,
... config ,
} ) ;
2024-10-15 21:43:14 +02:00
case "litellm" :
2024-10-15 21:36:06 +02:00
return new ChatOpenAI ( {
configuration : {
2024-10-15 21:43:14 +02:00
baseURL : process . env . LITE _LLM _BASE _PATH ,
2024-10-15 21:36:06 +02:00
} ,
2024-10-15 21:43:14 +02:00
apiKey : process . env . LITE _LLM _API _KEY ? ? null ,
2024-10-15 21:36:06 +02:00
... config ,
} ) ;
2024-11-04 20:34:29 +01:00
case "novita" :
return new ChatOpenAI ( {
configuration : {
baseURL : "https://api.novita.ai/v3/openai" ,
} ,
apiKey : process . env . NOVITA _LLM _API _KEY ? ? null ,
... config ,
} ) ;
2024-10-15 21:43:14 +02:00
2024-06-10 23:31:39 +02:00
default :
throw new Error ( ` Unsupported provider ${ provider } for this task. ` ) ;
2024-04-17 23:04:51 +02:00
}
}
2024-08-15 21:13:28 +02:00
/ * *
* Get the context limit for a provider / model combination using static method in AIProvider class .
* @ param { string } provider
* @ param { string } modelName
* @ returns { number }
* /
static contextLimit ( provider = "openai" , modelName ) {
const llm = getLLMProviderClass ( { provider } ) ;
if ( ! llm || ! llm . hasOwnProperty ( "promptWindowLimit" ) ) return 8_000 ;
return llm . promptWindowLimit ( modelName ) ;
2024-04-17 23:04:51 +02:00
}
2024-05-08 01:35:47 +02:00
2024-05-09 00:17:54 +02:00
// For some providers we may want to override the system prompt to be more verbose.
// Currently we only do this for lmstudio, but we probably will want to expand this even more
// to any Untooled LLM.
2024-05-08 01:35:47 +02:00
static systemPrompt ( provider = null ) {
switch ( provider ) {
case "lmstudio" :
return "You are a helpful ai assistant who can assist the user and use tools available to help answer the users prompts and questions. Tools will be handled by another assistant and you will simply receive their responses to help answer the user prompt - always try to answer the user's prompt the best you can with the context available to you and your general knowledge." ;
default :
return DEFAULT _WORKSPACE _PROMPT ;
}
}
2024-04-16 19:50:10 +02:00
}
module . exports = Provider ;