anything-llm/server/utils/helpers/index.js
Francisco Bischoff f499f1ba59
Using OpenAI API locally (#335)
* Using OpenAI API locally

* Infinite prompt input and compression implementation (#332)

* WIP on continuous prompt window summary

* wip

* Move chat out of VDB
simplify chat interface
normalize LLM model interface
have compression abstraction
Cleanup compressor
TODO: Anthropic stuff

* Implement compression for Anythropic
Fix lancedb sources

* cleanup vectorDBs and check that lance, chroma, and pinecone are returning valid metadata sources

* Resolve Weaviate citation sources not working with schema

* comment cleanup

* disable import on hosted instances (#339)

* disable import on hosted instances

* Update UI on disabled import/export

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>

* Add support for gpt-4-turbo 128K model (#340)

resolves #336
Add support for gpt-4-turbo 128K model

* 315 show citations based on relevancy score (#316)

* settings for similarity score threshold and prisma schema updated

* prisma schema migration for adding similarityScore setting

* WIP

* Min score default change

* added similarityThreshold checking for all vectordb providers

* linting

---------

Co-authored-by: shatfield4 <seanhatfield5@gmail.com>

* rename localai to lmstudio

* forgot files that were renamed

* normalize model interface

* add model and context window limits

* update LMStudio tagline

* Fully working LMStudio integration

---------
Co-authored-by: Francisco Bischoff <984592+franzbischoff@users.noreply.github.com>
Co-authored-by: Timothy Carambat <rambat1010@gmail.com>
Co-authored-by: Sean Hatfield <seanhatfield5@gmail.com>
2023-11-09 12:33:21 -08:00

75 lines
2.2 KiB
JavaScript

function getVectorDbClass() {
const vectorSelection = process.env.VECTOR_DB || "pinecone";
switch (vectorSelection) {
case "pinecone":
const { Pinecone } = require("../vectorDbProviders/pinecone");
return Pinecone;
case "chroma":
const { Chroma } = require("../vectorDbProviders/chroma");
return Chroma;
case "lancedb":
const { LanceDb } = require("../vectorDbProviders/lance");
return LanceDb;
case "weaviate":
const { Weaviate } = require("../vectorDbProviders/weaviate");
return Weaviate;
case "qdrant":
const { QDrant } = require("../vectorDbProviders/qdrant");
return QDrant;
default:
throw new Error("ENV: No VECTOR_DB value found in environment!");
}
}
function getLLMProvider() {
const vectorSelection = process.env.LLM_PROVIDER || "openai";
let embedder = null;
switch (vectorSelection) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return new OpenAiLLM();
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return new AzureOpenAiLLM();
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
embedder = getEmbeddingEngineSelection();
return new AnthropicLLM(embedder);
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
embedder = getEmbeddingEngineSelection();
return new LMStudioLLM(embedder);
default:
throw new Error("ENV: No LLM_PROVIDER value found in environment!");
}
}
function getEmbeddingEngineSelection() {
const engineSelection = process.env.EMBEDDING_ENGINE;
switch (engineSelection) {
case "openai":
const { OpenAiEmbedder } = require("../EmbeddingEngines/openAi");
return new OpenAiEmbedder();
case "azure":
const {
AzureOpenAiEmbedder,
} = require("../EmbeddingEngines/azureOpenAi");
return new AzureOpenAiEmbedder();
default:
return null;
}
}
function toChunks(arr, size) {
return Array.from({ length: Math.ceil(arr.length / size) }, (_v, i) =>
arr.slice(i * size, i * size + size)
);
}
module.exports = {
getEmbeddingEngineSelection,
getVectorDbClass,
getLLMProvider,
toChunks,
};