mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-13 02:00:10 +01:00
persist provider keys between toggle providers (#1041)
This commit is contained in:
parent
5ef537364e
commit
8c7379cda1
@ -33,236 +33,133 @@ const SystemSettings = {
|
|||||||
const llmProvider = process.env.LLM_PROVIDER;
|
const llmProvider = process.env.LLM_PROVIDER;
|
||||||
const vectorDB = process.env.VECTOR_DB;
|
const vectorDB = process.env.VECTOR_DB;
|
||||||
return {
|
return {
|
||||||
|
// --------------------------------------------------------
|
||||||
|
// General Settings
|
||||||
|
// --------------------------------------------------------
|
||||||
RequiresAuth: !!process.env.AUTH_TOKEN,
|
RequiresAuth: !!process.env.AUTH_TOKEN,
|
||||||
AuthToken: !!process.env.AUTH_TOKEN,
|
AuthToken: !!process.env.AUTH_TOKEN,
|
||||||
JWTSecret: !!process.env.JWT_SECRET,
|
JWTSecret: !!process.env.JWT_SECRET,
|
||||||
StorageDir: process.env.STORAGE_DIR,
|
StorageDir: process.env.STORAGE_DIR,
|
||||||
MultiUserMode: await this.isMultiUserMode(),
|
MultiUserMode: await this.isMultiUserMode(),
|
||||||
VectorDB: vectorDB,
|
DisableTelemetry: process.env.DISABLE_TELEMETRY || "false",
|
||||||
HasExistingEmbeddings: await this.hasEmbeddings(),
|
|
||||||
|
// --------------------------------------------------------
|
||||||
|
// Embedder Provider Selection Settings & Configs
|
||||||
|
// --------------------------------------------------------
|
||||||
EmbeddingEngine: process.env.EMBEDDING_ENGINE,
|
EmbeddingEngine: process.env.EMBEDDING_ENGINE,
|
||||||
|
HasExistingEmbeddings: await this.hasEmbeddings(),
|
||||||
EmbeddingBasePath: process.env.EMBEDDING_BASE_PATH,
|
EmbeddingBasePath: process.env.EMBEDDING_BASE_PATH,
|
||||||
EmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
EmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
||||||
EmbeddingModelMaxChunkLength:
|
EmbeddingModelMaxChunkLength:
|
||||||
process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH,
|
process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH,
|
||||||
LocalAiApiKey: !!process.env.LOCAL_AI_API_KEY,
|
|
||||||
DisableTelemetry: process.env.DISABLE_TELEMETRY || "false",
|
// --------------------------------------------------------
|
||||||
...(vectorDB === "pinecone"
|
// VectorDB Provider Selection Settings & Configs
|
||||||
? {
|
// --------------------------------------------------------
|
||||||
PineConeKey: !!process.env.PINECONE_API_KEY,
|
VectorDB: vectorDB,
|
||||||
PineConeIndex: process.env.PINECONE_INDEX,
|
// Pinecone DB Keys
|
||||||
}
|
PineConeKey: !!process.env.PINECONE_API_KEY,
|
||||||
: {}),
|
PineConeIndex: process.env.PINECONE_INDEX,
|
||||||
...(vectorDB === "chroma"
|
|
||||||
? {
|
// Chroma DB Keys
|
||||||
ChromaEndpoint: process.env.CHROMA_ENDPOINT,
|
ChromaEndpoint: process.env.CHROMA_ENDPOINT,
|
||||||
ChromaApiHeader: process.env.CHROMA_API_HEADER,
|
ChromaApiHeader: process.env.CHROMA_API_HEADER,
|
||||||
ChromaApiKey: !!process.env.CHROMA_API_KEY,
|
ChromaApiKey: !!process.env.CHROMA_API_KEY,
|
||||||
}
|
|
||||||
: {}),
|
// Weaviate DB Keys
|
||||||
...(vectorDB === "weaviate"
|
WeaviateEndpoint: process.env.WEAVIATE_ENDPOINT,
|
||||||
? {
|
WeaviateApiKey: process.env.WEAVIATE_API_KEY,
|
||||||
WeaviateEndpoint: process.env.WEAVIATE_ENDPOINT,
|
|
||||||
WeaviateApiKey: process.env.WEAVIATE_API_KEY,
|
// QDrant DB Keys
|
||||||
}
|
QdrantEndpoint: process.env.QDRANT_ENDPOINT,
|
||||||
: {}),
|
QdrantApiKey: process.env.QDRANT_API_KEY,
|
||||||
...(vectorDB === "qdrant"
|
|
||||||
? {
|
// Milvus DB Keys
|
||||||
QdrantEndpoint: process.env.QDRANT_ENDPOINT,
|
MilvusAddress: process.env.MILVUS_ADDRESS,
|
||||||
QdrantApiKey: process.env.QDRANT_API_KEY,
|
MilvusUsername: process.env.MILVUS_USERNAME,
|
||||||
}
|
MilvusPassword: !!process.env.MILVUS_PASSWORD,
|
||||||
: {}),
|
|
||||||
...(vectorDB === "milvus"
|
// Zilliz DB Keys
|
||||||
? {
|
ZillizEndpoint: process.env.ZILLIZ_ENDPOINT,
|
||||||
MilvusAddress: process.env.MILVUS_ADDRESS,
|
ZillizApiToken: process.env.ZILLIZ_API_TOKEN,
|
||||||
MilvusUsername: process.env.MILVUS_USERNAME,
|
|
||||||
MilvusPassword: !!process.env.MILVUS_PASSWORD,
|
// AstraDB Keys
|
||||||
}
|
AstraDBApplicationToken: process?.env?.ASTRA_DB_APPLICATION_TOKEN,
|
||||||
: {}),
|
AstraDBEndpoint: process?.env?.ASTRA_DB_ENDPOINT,
|
||||||
...(vectorDB === "zilliz"
|
|
||||||
? {
|
// --------------------------------------------------------
|
||||||
ZillizEndpoint: process.env.ZILLIZ_ENDPOINT,
|
// LLM Provider Selection Settings & Configs
|
||||||
ZillizApiToken: process.env.ZILLIZ_API_TOKEN,
|
// --------------------------------------------------------
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(vectorDB === "astra"
|
|
||||||
? {
|
|
||||||
AstraDBApplicationToken: process?.env?.ASTRA_DB_APPLICATION_TOKEN,
|
|
||||||
AstraDBEndpoint: process?.env?.ASTRA_DB_ENDPOINT,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
LLMProvider: llmProvider,
|
LLMProvider: llmProvider,
|
||||||
...(llmProvider === "openai"
|
// OpenAI Keys
|
||||||
? {
|
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
OpenAiModelPref: process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo",
|
||||||
OpenAiModelPref: process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo",
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "azure"
|
// Azure + OpenAI Keys
|
||||||
? {
|
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
AzureOpenAiModelPref: process.env.OPEN_MODEL_PREF,
|
||||||
AzureOpenAiModelPref: process.env.OPEN_MODEL_PREF,
|
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
AzureOpenAiTokenLimit: process.env.AZURE_OPENAI_TOKEN_LIMIT || 4096,
|
||||||
AzureOpenAiTokenLimit: process.env.AZURE_OPENAI_TOKEN_LIMIT || 4096,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "anthropic"
|
// Anthropic Keys
|
||||||
? {
|
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
|
||||||
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
|
AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
|
||||||
AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
|
|
||||||
|
|
||||||
// For embedding credentials when Anthropic is selected.
|
// Gemini Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
GeminiLLMModelPref: process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro",
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "gemini"
|
// LMStudio Keys
|
||||||
? {
|
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
|
||||||
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
|
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
|
||||||
GeminiLLMModelPref:
|
LMStudioModelPref: process.env.LMSTUDIO_MODEL_PREF,
|
||||||
process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro",
|
|
||||||
|
|
||||||
// For embedding credentials when Gemini is selected.
|
// LocalAI Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
LocalAiApiKey: !!process.env.LOCAL_AI_API_KEY,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
LocalAiBasePath: process.env.LOCAL_AI_BASE_PATH,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
LocalAiModelPref: process.env.LOCAL_AI_MODEL_PREF,
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
LocalAiTokenLimit: process.env.LOCAL_AI_MODEL_TOKEN_LIMIT,
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "lmstudio"
|
// Ollama LLM Keys
|
||||||
? {
|
OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH,
|
||||||
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
|
OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF,
|
||||||
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
|
OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT,
|
||||||
LMStudioModelPref: process.env.LMSTUDIO_MODEL_PREF,
|
|
||||||
|
|
||||||
// For embedding credentials when lmstudio is selected.
|
// TogetherAI Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(llmProvider === "localai"
|
|
||||||
? {
|
|
||||||
LocalAiBasePath: process.env.LOCAL_AI_BASE_PATH,
|
|
||||||
LocalAiModelPref: process.env.LOCAL_AI_MODEL_PREF,
|
|
||||||
LocalAiTokenLimit: process.env.LOCAL_AI_MODEL_TOKEN_LIMIT,
|
|
||||||
|
|
||||||
// For embedding credentials when localai is selected.
|
// Perplexity AI Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "ollama"
|
// OpenRouter Keys
|
||||||
? {
|
OpenRouterApiKey: !!process.env.OPENROUTER_API_KEY,
|
||||||
OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH,
|
OpenRouterModelPref: process.env.OPENROUTER_MODEL_PREF,
|
||||||
OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF,
|
|
||||||
OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT,
|
|
||||||
|
|
||||||
// For embedding credentials when ollama is selected.
|
// Mistral AI (API) Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
MistralApiKey: !!process.env.MISTRAL_API_KEY,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(llmProvider === "togetherai"
|
|
||||||
? {
|
|
||||||
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
|
|
||||||
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
|
|
||||||
|
|
||||||
// For embedding credentials when ollama is selected.
|
// Groq AI API Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
GroqApiKey: !!process.env.GROQ_API_KEY,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
GroqModelPref: process.env.GROQ_MODEL_PREF,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(llmProvider === "perplexity"
|
|
||||||
? {
|
|
||||||
PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
|
|
||||||
PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,
|
|
||||||
|
|
||||||
// For embedding credentials when ollama is selected.
|
// Native LLM Keys
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(llmProvider === "openrouter"
|
|
||||||
? {
|
|
||||||
OpenRouterApiKey: !!process.env.OPENROUTER_API_KEY,
|
|
||||||
OpenRouterModelPref: process.env.OPENROUTER_MODEL_PREF,
|
|
||||||
|
|
||||||
// For embedding credentials when ollama is selected.
|
// HuggingFace Dedicated Inference
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(llmProvider === "mistral"
|
|
||||||
? {
|
|
||||||
MistralApiKey: !!process.env.MISTRAL_API_KEY,
|
|
||||||
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
|
|
||||||
|
|
||||||
// For embedding credentials when mistral is selected.
|
// --------------------------------------------------------
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
// Whisper (Audio transcription) Selection Settings & Configs
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
// - Currently the only 3rd party is OpenAI, so is OPEN_AI_KEY is set
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
// - then it can be shared.
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
// --------------------------------------------------------
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "groq"
|
|
||||||
? {
|
|
||||||
GroqApiKey: !!process.env.GROQ_API_KEY,
|
|
||||||
GroqModelPref: process.env.GROQ_MODEL_PREF,
|
|
||||||
|
|
||||||
// For embedding credentials when groq is selected.
|
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...(llmProvider === "native"
|
|
||||||
? {
|
|
||||||
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
|
|
||||||
NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,
|
|
||||||
|
|
||||||
// For embedding credentials when native is selected.
|
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
|
|
||||||
...(llmProvider === "huggingface"
|
|
||||||
? {
|
|
||||||
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
|
|
||||||
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
|
|
||||||
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
|
|
||||||
|
|
||||||
// For embedding credentials when Anthropic is selected.
|
|
||||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
|
||||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
|
||||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
|
||||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
WhisperProvider: process.env.WHISPER_PROVIDER || "local",
|
WhisperProvider: process.env.WHISPER_PROVIDER || "local",
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user