Make native embedder the fallback for all LLMs (#1427)

This commit is contained in:
Timothy Carambat 2024-05-16 17:25:05 -07:00 committed by GitHub
parent 7e0b638a2c
commit 01cf2fed17
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 29 additions and 74 deletions

View File

@ -3,6 +3,7 @@ const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
class AnthropicLLM { class AnthropicLLM {
constructor(embedder = null, modelPreference = null) { constructor(embedder = null, modelPreference = null) {
@ -23,11 +24,7 @@ class AnthropicLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID ANTHROPIC SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Anthropic as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,4 +1,4 @@
const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi"); const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
@ -23,11 +23,7 @@ class AzureOpenAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -19,7 +19,8 @@ class CohereLLM {
system: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !!embedder ? embedder : new NativeEmbedder();
this.embedder = embedder ?? new NativeEmbedder();
} }
#appendContext(contextTexts = []) { #appendContext(contextTexts = []) {

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
@ -26,11 +27,7 @@ class GeminiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; // not used for Gemini this.defaultTemp = 0.7; // not used for Gemini
} }

View File

@ -27,11 +27,7 @@ class GenericOpenAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for GenericOpenAiLLM - falling back to NativeEmbedder for embedding!"
);
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -20,7 +20,7 @@ class GroqLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,5 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -26,11 +25,7 @@ class HuggingFaceLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for HuggingFaceLLM - falling back to Native for embedding!"
);
this.embedder = !embedder ? new OpenAiEmbedder() : new NativeEmbedder();
this.defaultTemp = 0.2; this.defaultTemp = 0.2;
} }

View File

@ -26,11 +26,7 @@ class KoboldCPPLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!"
);
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -26,11 +26,7 @@ class LiteLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for LiteLLM - falling back to NativeEmbedder for embedding!"
);
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -27,11 +28,7 @@ class LMStudioLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -19,11 +20,7 @@ class LocalAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -20,11 +21,7 @@ class MistralLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!"
);
this.embedder = embedder;
this.defaultTemp = 0.0; this.defaultTemp = 0.0;
} }

View File

@ -23,7 +23,7 @@ class NativeLLM {
system: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = embedder || new NativeEmbedder(); this.embedder = embedder ?? new NativeEmbedder();
this.cacheDir = path.resolve( this.cacheDir = path.resolve(
process.env.STORAGE_DIR process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "downloaded") ? path.resolve(process.env.STORAGE_DIR, "models", "downloaded")

View File

@ -3,6 +3,7 @@ const {
writeResponseChunk, writeResponseChunk,
clientAbortedHandler, clientAbortedHandler,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md // Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md
class OllamaAILLM { class OllamaAILLM {
@ -18,11 +19,7 @@ class OllamaAILLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
throw new Error(
"INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -1,4 +1,4 @@
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -18,11 +18,7 @@ class OpenAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = embedder ?? new NativeEmbedder();
console.warn(
"No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!"
);
this.embedder = !embedder ? new OpenAiEmbedder() : embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -36,7 +36,7 @@ class OpenRouterLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder)) if (!fs.existsSync(cacheFolder))

View File

@ -28,7 +28,7 @@ class PerplexityLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }

View File

@ -23,7 +23,7 @@ class TextGenWebUILLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
this.embedder = !embedder ? new NativeEmbedder() : embedder; this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`); this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
} }

View File

@ -1,3 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { const {
handleDefaultStreamResponseV2, handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses"); } = require("../../helpers/chat/responses");
@ -23,11 +24,7 @@ class TogetherAiLLM {
user: this.promptWindowLimit() * 0.7, user: this.promptWindowLimit() * 0.7,
}; };
if (!embedder) this.embedder = !embedder ? new NativeEmbedder() : embedder;
throw new Error(
"INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM."
);
this.embedder = embedder;
this.defaultTemp = 0.7; this.defaultTemp = 0.7;
} }