788 ollama embedder (#814)

* Add Ollama embedder model support calls

* update docs
This commit is contained in:
Timothy Carambat 2024-02-26 16:12:20 -08:00 committed by GitHub
parent b20e3ce52c
commit b64cb199f9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 251 additions and 2 deletions

View File

@ -82,6 +82,7 @@ Some cool features of AnythingLLM
- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [LM Studio (all)](https://lmstudio.ai) - [LM Studio (all)](https://lmstudio.ai)
- [LocalAi (all)](https://localai.io/) - [LocalAi (all)](https://localai.io/)
- [Ollama (all)](https://ollama.ai/)
**Supported Vector Databases:** **Supported Vector Databases:**

View File

@ -79,6 +79,11 @@ GID='1000'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002' # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
# EMBEDDING_ENGINE='ollama'
# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
########################################### ###########################################
######## Vector Database Selection ######## ######## Vector Database Selection ########
########################################### ###########################################

View File

@ -0,0 +1,120 @@
import React, { useEffect, useState } from "react";
import System from "@/models/system";
export default function OllamaEmbeddingOptions({ settings }) {
const [basePathValue, setBasePathValue] = useState(
settings?.EmbeddingBasePath
);
const [basePath, setBasePath] = useState(settings?.EmbeddingBasePath);
return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
LocalAI Base URL
</label>
<input
type="url"
name="EmbeddingBasePath"
className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:11434"
defaultValue={settings?.EmbeddingBasePath}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<OllamaLLMModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Max embedding chunk length
</label>
<input
type="number"
name="EmbeddingModelMaxChunkLength"
className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="8192"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.EmbeddingModelMaxChunkLength}
required={false}
autoComplete="off"
/>
</div>
</div>
</div>
);
}
function OllamaLLMModelSelection({ settings, basePath = null }) {
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
if (!basePath) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels("ollama", null, basePath);
setCustomModels(models || []);
setLoading(false);
}
findCustomModels();
}, [basePath]);
if (loading || customModels.length == 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Embedding Model Selection
</label>
<select
name="EmbeddingModelPref"
disabled={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
{!!basePath
? "-- loading available models --"
: "-- waiting for URL --"}
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Embedding Model Selection
</label>
<select
name="EmbeddingModelPref"
required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{customModels.length > 0 && (
<optgroup label="Your loaded models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={settings.EmbeddingModelPref === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
</select>
</div>
);
}

View File

@ -7,12 +7,14 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png"; import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import LocalAiLogo from "@/media/llmprovider/localai.png"; import LocalAiLogo from "@/media/llmprovider/localai.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import PreLoader from "@/components/Preloader"; import PreLoader from "@/components/Preloader";
import ChangeWarningModal from "@/components/ChangeWarning"; import ChangeWarningModal from "@/components/ChangeWarning";
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions"; import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions"; import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions"; import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions"; import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem"; import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
import { MagnifyingGlass } from "@phosphor-icons/react"; import { MagnifyingGlass } from "@phosphor-icons/react";
import { useModal } from "@/hooks/useModal"; import { useModal } from "@/hooks/useModal";
@ -108,6 +110,13 @@ export default function GeneralEmbeddingPreference() {
options: <LocalAiOptions settings={settings} />, options: <LocalAiOptions settings={settings} />,
description: "Run embedding models locally on your own machine.", description: "Run embedding models locally on your own machine.",
}, },
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: <OllamaEmbeddingOptions settings={settings} />,
description: "Run embedding models locally on your own machine.",
},
]; ];
useEffect(() => { useEffect(() => {

View File

@ -221,6 +221,13 @@ const EMBEDDING_ENGINE_PRIVACY = {
], ],
logo: LocalAiLogo, logo: LocalAiLogo,
}, },
ollama: {
name: "Ollama",
description: [
"Your document text is embedded privately on the server running Ollama",
],
logo: OllamaLogo,
},
}; };
export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) { export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {

View File

@ -4,10 +4,12 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png"; import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import LocalAiLogo from "@/media/llmprovider/localai.png"; import LocalAiLogo from "@/media/llmprovider/localai.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions"; import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions"; import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions"; import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions"; import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem"; import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
import System from "@/models/system"; import System from "@/models/system";
import paths from "@/utils/paths"; import paths from "@/utils/paths";
@ -70,6 +72,13 @@ export default function EmbeddingPreference({
options: <LocalAiOptions settings={settings} />, options: <LocalAiOptions settings={settings} />,
description: "Run embedding models locally on your own machine.", description: "Run embedding models locally on your own machine.",
}, },
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: <OllamaEmbeddingOptions settings={settings} />,
description: "Run embedding models locally on your own machine.",
},
]; ];
function handleForward() { function handleForward() {

View File

@ -76,6 +76,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# EMBEDDING_MODEL_PREF='text-embedding-ada-002' # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
# EMBEDDING_ENGINE='ollama'
# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
########################################### ###########################################
######## Vector Database Selection ######## ######## Vector Database Selection ########
########################################### ###########################################

View File

@ -0,0 +1,90 @@
const { maximumChunkLength } = require("../../helpers");
class OllamaEmbedder {
constructor() {
if (!process.env.EMBEDDING_BASE_PATH)
throw new Error("No embedding base path was set.");
if (!process.env.EMBEDDING_MODEL_PREF)
throw new Error("No embedding model was set.");
this.basePath = `${process.env.EMBEDDING_BASE_PATH}/api/embeddings`;
this.model = process.env.EMBEDDING_MODEL_PREF;
// Limit of how many strings we can process in a single pass to stay with resource or network limits
this.maxConcurrentChunks = 1;
this.embeddingMaxChunkLength = maximumChunkLength();
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
async embedTextInput(textInput) {
const result = await this.embedChunks([textInput]);
return result?.[0] || [];
}
async embedChunks(textChunks = []) {
const embeddingRequests = [];
this.log(
`Embedding ${textChunks.length} chunks of text with ${this.model}.`
);
for (const chunk of textChunks) {
embeddingRequests.push(
new Promise((resolve) => {
fetch(this.basePath, {
method: "POST",
body: JSON.stringify({
model: this.model,
prompt: chunk,
}),
})
.then((res) => res.json())
.then(({ embedding }) => {
resolve({ data: embedding, error: null });
return;
})
.catch((error) => {
resolve({ data: [], error: error.message });
return;
});
})
);
}
const { data = [], error = null } = await Promise.all(
embeddingRequests
).then((results) => {
// If any errors were returned from Ollama abort the entire sequence because the embeddings
// will be incomplete.
const errors = results
.filter((res) => !!res.error)
.map((res) => res.error)
.flat();
if (errors.length > 0) {
let uniqueErrors = new Set();
errors.map((error) =>
uniqueErrors.add(`[${error.type}]: ${error.message}`)
);
return {
data: [],
error: Array.from(uniqueErrors).join(", "),
};
}
return {
data: results.map((res) => res?.data || []),
error: null,
};
});
if (!!error) throw new Error(`Ollama Failed to embed: ${error}`);
return data.length > 0 ? data : null;
}
}
module.exports = {
OllamaEmbedder,
};

View File

@ -92,6 +92,9 @@ function getEmbeddingEngineSelection() {
case "localai": case "localai":
const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi"); const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
return new LocalAiEmbedder(); return new LocalAiEmbedder();
case "ollama":
const { OllamaEmbedder } = require("../EmbeddingEngines/ollama");
return new OllamaEmbedder();
case "native": case "native":
const { NativeEmbedder } = require("../EmbeddingEngines/native"); const { NativeEmbedder } = require("../EmbeddingEngines/native");
console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder"); console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder");

View File

@ -135,7 +135,7 @@ const KEY_MAPPING = {
}, },
EmbeddingBasePath: { EmbeddingBasePath: {
envKey: "EMBEDDING_BASE_PATH", envKey: "EMBEDDING_BASE_PATH",
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl], checks: [isNotEmpty, validDockerizedUrl],
}, },
EmbeddingModelPref: { EmbeddingModelPref: {
envKey: "EMBEDDING_MODEL_PREF", envKey: "EMBEDDING_MODEL_PREF",
@ -355,7 +355,7 @@ function validAnthropicModel(input = "") {
} }
function supportedEmbeddingModel(input = "") { function supportedEmbeddingModel(input = "") {
const supported = ["openai", "azure", "localai", "native"]; const supported = ["openai", "azure", "localai", "native", "ollama"];
return supported.includes(input) return supported.includes(input)
? null ? null
: `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`; : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;