anything-llm/server/utils/helpers/customModels.js
Timothy Carambat 655ebd9479
[Feature] AnythingLLM use locally hosted Llama.cpp and GGUF files for inferencing (#413)
* Implement use of native embedder (all-Mini-L6-v2)
stop showing prisma queries during dev

* Add native embedder as an available embedder selection

* wrap model loader in try/catch

* print progress on download

* add built-in LLM support (expiermental)

* Update to progress output for embedder

* move embedder selection options to component

* saftey checks for modelfile

* update ref

* Hide selection when on hosted subdomain

* update documentation
hide localLlama when on hosted

* saftey checks for storage of models

* update dockerfile to pre-build Llama.cpp bindings

* update lockfile

* add langchain doc comment

* remove extraneous --no-metal option

* Show data handling for private LLM

* persist model in memory for N+1 chats

* update import
update dev comment on token model size

* update primary README

* chore: more readme updates and remove screenshots - too much to maintain, just use the app!

* remove screeshot link
2023-12-07 14:48:27 -08:00

81 lines
2.2 KiB
JavaScript

const SUPPORT_CUSTOM_MODELS = ["openai", "localai", "native-llm"];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
if (!SUPPORT_CUSTOM_MODELS.includes(provider))
return { models: [], error: "Invalid provider for custom models" };
switch (provider) {
case "openai":
return await openAiModels(apiKey);
case "localai":
return await localAIModels(basePath);
case "native-llm":
return nativeLLMModels();
default:
return { models: [], error: "Invalid provider for custom models" };
}
}
async function openAiModels(apiKey = null) {
const { Configuration, OpenAIApi } = require("openai");
const config = new Configuration({
apiKey: apiKey || process.env.OPEN_AI_KEY,
});
const openai = new OpenAIApi(config);
const models = (
await openai
.listModels()
.then((res) => res.data.data)
.catch((e) => {
console.error(`OpenAI:listModels`, e.message);
return [];
})
).filter(
(model) => !model.owned_by.includes("openai") && model.owned_by !== "system"
);
return { models, error: null };
}
async function localAIModels(basePath = null, apiKey = null) {
const { Configuration, OpenAIApi } = require("openai");
const config = new Configuration({
basePath,
...(!!apiKey ? { apiKey } : {}),
});
const openai = new OpenAIApi(config);
const models = await openai
.listModels()
.then((res) => res.data.data)
.catch((e) => {
console.error(`LocalAI:listModels`, e.message);
return [];
});
return { models, error: null };
}
function nativeLLMModels() {
const fs = require("fs");
const path = require("path");
const storageDir = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "downloaded")
: path.resolve(__dirname, `../../storage/models/downloaded`)
);
if (!fs.existsSync(storageDir))
return { models: [], error: "No model/downloaded storage folder found." };
const files = fs
.readdirSync(storageDir)
.filter((file) => file.toLowerCase().includes(".gguf"))
.map((file) => {
return { id: file, name: file };
});
return { models: files, error: null };
}
module.exports = {
getCustomModels,
};