anything-llm/server/utils/helpers/index.js

128 lines
4.6 KiB
JavaScript
Raw Normal View History

2023-06-08 06:31:35 +02:00
function getVectorDbClass() {
const vectorSelection = process.env.VECTOR_DB || "pinecone";
switch (vectorSelection) {
case "pinecone":
const { Pinecone } = require("../vectorDbProviders/pinecone");
2023-06-08 06:31:35 +02:00
return Pinecone;
case "chroma":
const { Chroma } = require("../vectorDbProviders/chroma");
2023-06-08 06:31:35 +02:00
return Chroma;
case "lancedb":
const { LanceDb } = require("../vectorDbProviders/lance");
return LanceDb;
case "weaviate":
const { Weaviate } = require("../vectorDbProviders/weaviate");
return Weaviate;
case "qdrant":
const { QDrant } = require("../vectorDbProviders/qdrant");
return QDrant;
case "milvus":
const { Milvus } = require("../vectorDbProviders/milvus");
return Milvus;
case "zilliz":
const { Zilliz } = require("../vectorDbProviders/zilliz");
return Zilliz;
case "astra":
const { AstraDB } = require("../vectorDbProviders/astra");
return AstraDB;
2023-06-08 06:31:35 +02:00
default:
throw new Error("ENV: No VECTOR_DB value found in environment!");
2023-06-08 06:31:35 +02:00
}
}
function getLLMProvider(modelPreference = null) {
const vectorSelection = process.env.LLM_PROVIDER || "openai";
const embedder = getEmbeddingEngineSelection();
switch (vectorSelection) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return new OpenAiLLM(embedder, modelPreference);
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return new AzureOpenAiLLM(embedder, modelPreference);
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return new AnthropicLLM(embedder, modelPreference);
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder, modelPreference);
Using OpenAI API locally (#335) * Using OpenAI API locally * Infinite prompt input and compression implementation (#332) * WIP on continuous prompt window summary * wip * Move chat out of VDB simplify chat interface normalize LLM model interface have compression abstraction Cleanup compressor TODO: Anthropic stuff * Implement compression for Anythropic Fix lancedb sources * cleanup vectorDBs and check that lance, chroma, and pinecone are returning valid metadata sources * Resolve Weaviate citation sources not working with schema * comment cleanup * disable import on hosted instances (#339) * disable import on hosted instances * Update UI on disabled import/export --------- Co-authored-by: timothycarambat <rambat1010@gmail.com> * Add support for gpt-4-turbo 128K model (#340) resolves #336 Add support for gpt-4-turbo 128K model * 315 show citations based on relevancy score (#316) * settings for similarity score threshold and prisma schema updated * prisma schema migration for adding similarityScore setting * WIP * Min score default change * added similarityThreshold checking for all vectordb providers * linting --------- Co-authored-by: shatfield4 <seanhatfield5@gmail.com> * rename localai to lmstudio * forgot files that were renamed * normalize model interface * add model and context window limits * update LMStudio tagline * Fully working LMStudio integration --------- Co-authored-by: Francisco Bischoff <984592+franzbischoff@users.noreply.github.com> Co-authored-by: Timothy Carambat <rambat1010@gmail.com> Co-authored-by: Sean Hatfield <seanhatfield5@gmail.com>
2023-11-09 21:33:21 +01:00
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return new LMStudioLLM(embedder, modelPreference);
case "localai":
const { LocalAiLLM } = require("../AiProviders/localAi");
return new LocalAiLLM(embedder, modelPreference);
case "ollama":
const { OllamaAILLM } = require("../AiProviders/ollama");
return new OllamaAILLM(embedder, modelPreference);
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return new TogetherAiLLM(embedder, modelPreference);
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return new PerplexityLLM(embedder, modelPreference);
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return new MistralLLM(embedder, modelPreference);
case "native":
const { NativeLLM } = require("../AiProviders/native");
return new NativeLLM(embedder, modelPreference);
case "huggingface":
const { HuggingFaceLLM } = require("../AiProviders/huggingface");
return new HuggingFaceLLM(embedder, modelPreference);
default:
throw new Error("ENV: No LLM_PROVIDER value found in environment!");
}
}
function getEmbeddingEngineSelection() {
const engineSelection = process.env.EMBEDDING_ENGINE;
switch (engineSelection) {
case "openai":
2023-10-30 23:49:29 +01:00
const { OpenAiEmbedder } = require("../EmbeddingEngines/openAi");
return new OpenAiEmbedder();
case "azure":
const {
AzureOpenAiEmbedder,
2023-10-30 23:49:29 +01:00
} = require("../EmbeddingEngines/azureOpenAi");
return new AzureOpenAiEmbedder();
case "localai":
const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
return new LocalAiEmbedder();
case "native":
const { NativeEmbedder } = require("../EmbeddingEngines/native");
console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder");
return new NativeEmbedder();
default:
return null;
}
}
// Some models have lower restrictions on chars that can be encoded in a single pass
// and by default we assume it can handle 1,000 chars, but some models use work with smaller
// chars so here we can override that value when embedding information.
function maximumChunkLength() {
if (
!!process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH &&
!isNaN(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH) &&
Number(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH) > 1
)
return Number(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH);
return 1_000;
}
function toChunks(arr, size) {
return Array.from({ length: Math.ceil(arr.length / size) }, (_v, i) =>
arr.slice(i * size, i * size + size)
);
}
2023-06-08 06:31:35 +02:00
module.exports = {
getEmbeddingEngineSelection,
maximumChunkLength,
2023-06-08 06:31:35 +02:00
getVectorDbClass,
getLLMProvider,
toChunks,
2023-06-08 06:31:35 +02:00
};