Add LLM support for Google Gemini-Pro (#492)

resolves #489
This commit is contained in:
Timothy Carambat 2023-12-27 17:08:03 -08:00 committed by GitHub
parent 26549df6a9
commit 24227e48a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 371 additions and 15 deletions

View File

@ -58,6 +58,7 @@ Some cool features of AnythingLLM
- [OpenAI](https://openai.com) - [OpenAI](https://openai.com)
- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [Anthropic ClaudeV2](https://www.anthropic.com/) - [Anthropic ClaudeV2](https://www.anthropic.com/)
- [Google Gemini Pro](https://ai.google.dev/)
- [LM Studio (all models)](https://lmstudio.ai) - [LM Studio (all models)](https://lmstudio.ai)
- [LocalAi (all models)](https://localai.io/) - [LocalAi (all models)](https://localai.io/)

View File

@ -11,6 +11,10 @@ GID='1000'
# OPEN_AI_KEY= # OPEN_AI_KEY=
# OPEN_MODEL_PREF='gpt-3.5-turbo' # OPEN_MODEL_PREF='gpt-3.5-turbo'
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-pro'
# LLM_PROVIDER='azure' # LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT= # AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY= # AZURE_OPENAI_KEY=

View File

@ -0,0 +1,43 @@
export default function GeminiLLMOptions({ settings }) {
return (
<div className="w-full flex flex-col">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Google AI API Key
</label>
<input
type="password"
name="GeminiLLMApiKey"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Google Gemini API Key"
defaultValue={settings?.GeminiLLMApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="GeminiLLMModelPref"
defaultValue={settings?.GeminiLLMModelPref || "gemini-pro"}
required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["gemini-pro"].map((model) => {
return (
<option key={model} value={model}>
{model}
</option>
);
})}
</select>
</div>
</div>
</div>
);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

View File

@ -46,10 +46,10 @@ export default function GeneralEmbeddingPreference() {
const { error } = await System.updateSystem(settingsData); const { error } = await System.updateSystem(settingsData);
if (error) { if (error) {
showToast(`Failed to save LLM settings: ${error}`, "error"); showToast(`Failed to save embedding settings: ${error}`, "error");
setHasChanges(true); setHasChanges(true);
} else { } else {
showToast("LLM preferences saved successfully.", "success"); showToast("Embedding preferences saved successfully.", "success");
setHasChanges(false); setHasChanges(false);
} }
setSaving(false); setSaving(false);
@ -132,7 +132,7 @@ export default function GeneralEmbeddingPreference() {
<div className="text-white text-sm font-medium py-4"> <div className="text-white text-sm font-medium py-4">
Embedding Providers Embedding Providers
</div> </div>
<div className="w-full flex md:flex-wrap overflow-x-scroll gap-4 max-w-[900px]"> <div className="w-full flex md:flex-wrap overflow-x-scroll gap-4">
<input <input
hidden={true} hidden={true}
name="EmbeddingEngine" name="EmbeddingEngine"
@ -174,7 +174,7 @@ export default function GeneralEmbeddingPreference() {
onClick={updateChoice} onClick={updateChoice}
/> />
</div> </div>
<div className="mt-10 flex flex-wrap gap-4 max-w-[800px]"> <div className="mt-10 flex flex-wrap gap-4">
{embeddingChoice === "native" && <NativeEmbeddingOptions />} {embeddingChoice === "native" && <NativeEmbeddingOptions />}
{embeddingChoice === "openai" && ( {embeddingChoice === "openai" && (
<OpenAiOptions settings={settings} /> <OpenAiOptions settings={settings} />

View File

@ -7,6 +7,7 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png"; import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png"; import LocalAiLogo from "@/media/llmprovider/localai.png";
import PreLoader from "@/components/Preloader"; import PreLoader from "@/components/Preloader";
@ -17,6 +18,7 @@ import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions"; import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions"; import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions"; import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
export default function GeneralLLMPreference() { export default function GeneralLLMPreference() {
const [saving, setSaving] = useState(false); const [saving, setSaving] = useState(false);
@ -105,13 +107,13 @@ export default function GeneralLLMPreference() {
<div className="text-white text-sm font-medium py-4"> <div className="text-white text-sm font-medium py-4">
LLM Providers LLM Providers
</div> </div>
<div className="w-full flex md:flex-wrap overflow-x-scroll gap-4 max-w-[900px]"> <div className="w-full flex md:flex-wrap overflow-x-scroll gap-4">
<input hidden={true} name="LLMProvider" value={llmChoice} /> <input hidden={true} name="LLMProvider" value={llmChoice} />
<LLMProviderOption <LLMProviderOption
name="OpenAI" name="OpenAI"
value="openai" value="openai"
link="openai.com" link="openai.com"
description="The standard option for most non-commercial use. Provides both chat and embedding." description="The standard option for most non-commercial use."
checked={llmChoice === "openai"} checked={llmChoice === "openai"}
image={OpenAiLogo} image={OpenAiLogo}
onClick={updateLLMChoice} onClick={updateLLMChoice}
@ -120,7 +122,7 @@ export default function GeneralLLMPreference() {
name="Azure OpenAI" name="Azure OpenAI"
value="azure" value="azure"
link="azure.microsoft.com" link="azure.microsoft.com"
description="The enterprise option of OpenAI hosted on Azure services. Provides both chat and embedding." description="The enterprise option of OpenAI hosted on Azure services."
checked={llmChoice === "azure"} checked={llmChoice === "azure"}
image={AzureOpenAiLogo} image={AzureOpenAiLogo}
onClick={updateLLMChoice} onClick={updateLLMChoice}
@ -129,11 +131,20 @@ export default function GeneralLLMPreference() {
name="Anthropic Claude 2" name="Anthropic Claude 2"
value="anthropic" value="anthropic"
link="anthropic.com" link="anthropic.com"
description="A friendly AI Assistant hosted by Anthropic. Provides chat services only!" description="A friendly AI Assistant hosted by Anthropic."
checked={llmChoice === "anthropic"} checked={llmChoice === "anthropic"}
image={AnthropicLogo} image={AnthropicLogo}
onClick={updateLLMChoice} onClick={updateLLMChoice}
/> />
<LLMProviderOption
name="Google Gemini"
value="gemini"
link="ai.google.dev"
description="Google's largest and most capable AI model"
checked={llmChoice === "gemini"}
image={GeminiLogo}
onClick={updateLLMChoice}
/>
<LLMProviderOption <LLMProviderOption
name="LM Studio" name="LM Studio"
value="lmstudio" value="lmstudio"
@ -173,6 +184,9 @@ export default function GeneralLLMPreference() {
{llmChoice === "anthropic" && ( {llmChoice === "anthropic" && (
<AnthropicAiOptions settings={settings} showAlert={true} /> <AnthropicAiOptions settings={settings} showAlert={true} />
)} )}
{llmChoice === "gemini" && (
<GeminiLLMOptions settings={settings} />
)}
{llmChoice === "lmstudio" && ( {llmChoice === "lmstudio" && (
<LMStudioOptions settings={settings} showAlert={true} /> <LMStudioOptions settings={settings} showAlert={true} />
)} )}

View File

@ -55,10 +55,10 @@ export default function GeneralVectorDatabase() {
const { error } = await System.updateSystem(settingsData); const { error } = await System.updateSystem(settingsData);
if (error) { if (error) {
showToast(`Failed to save LLM settings: ${error}`, "error"); showToast(`Failed to save vector database settings: ${error}`, "error");
setHasChanges(true); setHasChanges(true);
} else { } else {
showToast("LLM preferences saved successfully.", "success"); showToast("Vector database preferences saved successfully.", "success");
setHasChanges(false); setHasChanges(false);
} }
setSaving(false); setSaving(false);

View File

@ -4,6 +4,7 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png"; import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png"; import LocalAiLogo from "@/media/llmprovider/localai.png";
import ChromaLogo from "@/media/vectordbs/chroma.png"; import ChromaLogo from "@/media/vectordbs/chroma.png";
@ -38,6 +39,14 @@ const LLM_SELECTION_PRIVACY = {
], ],
logo: AnthropicLogo, logo: AnthropicLogo,
}, },
gemini: {
name: "Google Gemini",
description: [
"Your chats are de-identified and used in training",
"Your prompts and document text are visible in responses to Google",
],
logo: GeminiLogo,
},
lmstudio: { lmstudio: {
name: "LMStudio", name: "LMStudio",
description: [ description: [

View File

@ -76,7 +76,7 @@ function EmbeddingSelection({ nextStep, prevStep, currentStep }) {
name="OpenAI" name="OpenAI"
value="openai" value="openai"
link="openai.com" link="openai.com"
description="The standard option for most non-commercial use. Provides both chat and embedding." description="The standard option for most non-commercial use."
checked={embeddingChoice === "openai"} checked={embeddingChoice === "openai"}
image={OpenAiLogo} image={OpenAiLogo}
onClick={updateChoice} onClick={updateChoice}
@ -85,7 +85,7 @@ function EmbeddingSelection({ nextStep, prevStep, currentStep }) {
name="Azure OpenAI" name="Azure OpenAI"
value="azure" value="azure"
link="azure.microsoft.com" link="azure.microsoft.com"
description="The enterprise option of OpenAI hosted on Azure services. Provides both chat and embedding." description="The enterprise option of OpenAI hosted on Azure services."
checked={embeddingChoice === "azure"} checked={embeddingChoice === "azure"}
image={AzureOpenAiLogo} image={AzureOpenAiLogo}
onClick={updateChoice} onClick={updateChoice}

View File

@ -3,6 +3,7 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import OpenAiLogo from "@/media/llmprovider/openai.png"; import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png"; import LocalAiLogo from "@/media/llmprovider/localai.png";
import System from "@/models/system"; import System from "@/models/system";
@ -14,6 +15,7 @@ import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions"; import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions"; import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions"; import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
function LLMSelection({ nextStep, prevStep, currentStep }) { function LLMSelection({ nextStep, prevStep, currentStep }) {
const [llmChoice, setLLMChoice] = useState("openai"); const [llmChoice, setLLMChoice] = useState("openai");
@ -71,7 +73,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
name="OpenAI" name="OpenAI"
value="openai" value="openai"
link="openai.com" link="openai.com"
description="The standard option for most non-commercial use. Provides both chat and embedding." description="The standard option for most non-commercial use."
checked={llmChoice === "openai"} checked={llmChoice === "openai"}
image={OpenAiLogo} image={OpenAiLogo}
onClick={updateLLMChoice} onClick={updateLLMChoice}
@ -80,7 +82,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
name="Azure OpenAI" name="Azure OpenAI"
value="azure" value="azure"
link="azure.microsoft.com" link="azure.microsoft.com"
description="The enterprise option of OpenAI hosted on Azure services. Provides both chat and embedding." description="The enterprise option of OpenAI hosted on Azure services."
checked={llmChoice === "azure"} checked={llmChoice === "azure"}
image={AzureOpenAiLogo} image={AzureOpenAiLogo}
onClick={updateLLMChoice} onClick={updateLLMChoice}
@ -94,6 +96,15 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
image={AnthropicLogo} image={AnthropicLogo}
onClick={updateLLMChoice} onClick={updateLLMChoice}
/> />
<LLMProviderOption
name="Google Gemini"
value="gemini"
link="ai.google.dev"
description="Google's largest and most capable AI model"
checked={llmChoice === "gemini"}
image={GeminiLogo}
onClick={updateLLMChoice}
/>
<LLMProviderOption <LLMProviderOption
name="LM Studio" name="LM Studio"
value="lmstudio" value="lmstudio"
@ -127,6 +138,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
{llmChoice === "anthropic" && ( {llmChoice === "anthropic" && (
<AnthropicAiOptions settings={settings} /> <AnthropicAiOptions settings={settings} />
)} )}
{llmChoice === "gemini" && <GeminiLLMOptions settings={settings} />}
{llmChoice === "lmstudio" && ( {llmChoice === "lmstudio" && (
<LMStudioOptions settings={settings} /> <LMStudioOptions settings={settings} />
)} )}

View File

@ -8,6 +8,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# OPEN_AI_KEY= # OPEN_AI_KEY=
# OPEN_MODEL_PREF='gpt-3.5-turbo' # OPEN_MODEL_PREF='gpt-3.5-turbo'
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-pro'
# LLM_PROVIDER='azure' # LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT= # AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY= # AZURE_OPENAI_KEY=

View File

@ -87,6 +87,20 @@ const SystemSettings = {
} }
: {}), : {}),
...(llmProvider === "gemini"
? {
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
GeminiLLMModelPref:
process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro",
// For embedding credentials when Gemini is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "lmstudio" ...(llmProvider === "lmstudio"
? { ? {
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH, LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,

View File

@ -22,6 +22,7 @@
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.8.1", "@anthropic-ai/sdk": "^0.8.1",
"@azure/openai": "^1.0.0-beta.3", "@azure/openai": "^1.0.0-beta.3",
"@google/generative-ai": "^0.1.3",
"@googleapis/youtube": "^9.0.0", "@googleapis/youtube": "^9.0.0",
"@pinecone-database/pinecone": "^0.1.6", "@pinecone-database/pinecone": "^0.1.6",
"@prisma/client": "5.3.0", "@prisma/client": "5.3.0",

View File

@ -0,0 +1,200 @@
const { v4 } = require("uuid");
const { chatPrompt } = require("../../chats");
class GeminiLLM {
constructor(embedder = null) {
if (!process.env.GEMINI_API_KEY)
throw new Error("No Gemini API key was set.");
// Docs: https://ai.google.dev/tutorials/node_quickstart
const { GoogleGenerativeAI } = require("@google/generative-ai");
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
this.model = process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro";
this.gemini = genAI.getGenerativeModel({ model: this.model });
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
if (!embedder)
throw new Error(
"INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
);
this.embedder = embedder;
this.answerKey = v4().split("-")[0];
}
streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}
promptWindowLimit() {
switch (this.model) {
case "gemini-pro":
return 30_720;
default:
return 30_720; // assume a gemini-pro model
}
}
isValidChatCompletionModel(modelName = "") {
const validModels = ["gemini-pro"];
return validModels.includes(modelName);
}
// Moderation cannot be done with Gemini.
// Not implemented so must be stubbed
async isSafe(_input = "") {
return { safe: true, reasons: [] };
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}
Context:
${contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")}`,
};
return [
prompt,
{ role: "assistant", content: "Okay." },
...chatHistory,
{ role: "USER_PROMPT", content: userPrompt },
];
}
// This will take an OpenAi format message array and only pluck valid roles from it.
formatMessages(messages = []) {
// Gemini roles are either user || model.
// and all "content" is relabeled to "parts"
return messages
.map((message) => {
if (message.role === "system")
return { role: "user", parts: message.content };
if (message.role === "user")
return { role: "user", parts: message.content };
if (message.role === "assistant")
return { role: "model", parts: message.content };
return null;
})
.filter((msg) => !!msg);
}
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Gemini chat: ${this.model} is not valid for chat completion!`
);
const compressedHistory = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
chatHistory,
},
rawHistory
);
const chatThread = this.gemini.startChat({
history: this.formatMessages(compressedHistory),
});
const result = await chatThread.sendMessage(prompt);
const response = result.response;
const responseText = response.text();
if (!responseText) throw new Error("Gemini: No response could be parsed.");
return responseText;
}
async getChatCompletion(messages = [], _opts = {}) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Gemini chat: ${this.model} is not valid for chat completion!`
);
const prompt = messages.find(
(chat) => chat.role === "USER_PROMPT"
)?.content;
const chatThread = this.gemini.startChat({
history: this.formatMessages(messages),
});
const result = await chatThread.sendMessage(prompt);
const response = result.response;
const responseText = response.text();
if (!responseText) throw new Error("Gemini: No response could be parsed.");
return responseText;
}
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Gemini chat: ${this.model} is not valid for chat completion!`
);
const compressedHistory = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
chatHistory,
},
rawHistory
);
const chatThread = this.gemini.startChat({
history: this.formatMessages(compressedHistory),
});
const responseStream = await chatThread.sendMessageStream(prompt);
if (!responseStream.stream)
throw new Error("Could not stream response stream from Gemini.");
return { type: "geminiStream", ...responseStream };
}
async streamGetChatCompletion(messages = [], _opts = {}) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Gemini chat: ${this.model} is not valid for chat completion!`
);
const prompt = messages.find(
(chat) => chat.role === "USER_PROMPT"
)?.content;
const chatThread = this.gemini.startChat({
history: this.formatMessages(messages),
});
const responseStream = await chatThread.sendMessageStream(prompt);
if (!responseStream.stream)
throw new Error("Could not stream response stream from Gemini.");
return { type: "geminiStream", ...responseStream };
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
}
module.exports = {
GeminiLLM,
};

View File

@ -202,6 +202,35 @@ async function streamEmptyEmbeddingChat({
function handleStreamResponses(response, stream, responseProps) { function handleStreamResponses(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps; const { uuid = uuidv4(), sources = [] } = responseProps;
// Gemini likes to return a stream asyncIterator which will
// be a totally different object than other models.
if (stream?.type === "geminiStream") {
return new Promise(async (resolve) => {
let fullText = "";
for await (const chunk of stream.stream) {
fullText += chunk.text();
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: chunk.text(),
close: false,
error: false,
});
}
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
resolve(fullText);
});
}
// If stream is not a regular OpenAI Stream (like if using native model) // If stream is not a regular OpenAI Stream (like if using native model)
// we can just iterate the stream content instead. // we can just iterate the stream content instead.
if (!stream.hasOwnProperty("data")) { if (!stream.hasOwnProperty("data")) {

View File

@ -34,6 +34,9 @@ function getLLMProvider() {
case "anthropic": case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic"); const { AnthropicLLM } = require("../AiProviders/anthropic");
return new AnthropicLLM(embedder); return new AnthropicLLM(embedder);
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder);
case "lmstudio": case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio"); const { LMStudioLLM } = require("../AiProviders/lmStudio");
return new LMStudioLLM(embedder); return new LMStudioLLM(embedder);

View File

@ -44,6 +44,15 @@ const KEY_MAPPING = {
checks: [isNotEmpty, validAnthropicModel], checks: [isNotEmpty, validAnthropicModel],
}, },
GeminiLLMApiKey: {
envKey: "GEMINI_API_KEY",
checks: [isNotEmpty],
},
GeminiLLMModelPref: {
envKey: "GEMINI_LLM_MODEL_PREF",
checks: [isNotEmpty, validGeminiModel],
},
// LMStudio Settings // LMStudio Settings
LMStudioBasePath: { LMStudioBasePath: {
envKey: "LMSTUDIO_BASE_PATH", envKey: "LMSTUDIO_BASE_PATH",
@ -204,12 +213,20 @@ function supportedLLM(input = "") {
"openai", "openai",
"azure", "azure",
"anthropic", "anthropic",
"gemini",
"lmstudio", "lmstudio",
"localai", "localai",
"native", "native",
].includes(input); ].includes(input);
} }
function validGeminiModel(input = "") {
const validModels = ["gemini-pro"];
return validModels.includes(input)
? null
: `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
}
function validAnthropicModel(input = "") { function validAnthropicModel(input = "") {
const validModels = ["claude-2", "claude-instant-1"]; const validModels = ["claude-2", "claude-instant-1"];
return validModels.includes(input) return validModels.includes(input)

View File

@ -140,6 +140,11 @@
resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6" resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6"
integrity sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw== integrity sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==
"@google/generative-ai@^0.1.3":
version "0.1.3"
resolved "https://registry.yarnpkg.com/@google/generative-ai/-/generative-ai-0.1.3.tgz#8e529d4d86c85b64d297b4abf1a653d613a09a9f"
integrity sha512-Cm4uJX1sKarpm1mje/MiOIinM7zdUUrQp/5/qGPAgznbdd/B9zup5ehT6c1qGqycFcSopTA1J1HpqHS5kJR8hQ==
"@googleapis/youtube@^9.0.0": "@googleapis/youtube@^9.0.0":
version "9.0.0" version "9.0.0"
resolved "https://registry.yarnpkg.com/@googleapis/youtube/-/youtube-9.0.0.tgz#e45f6f5f7eac198c6391782b94b3ca54bacf0b63" resolved "https://registry.yarnpkg.com/@googleapis/youtube/-/youtube-9.0.0.tgz#e45f6f5f7eac198c6391782b94b3ca54bacf0b63"