diff --git a/README.md b/README.md
index ff50a858..20035570 100644
--- a/README.md
+++ b/README.md
@@ -71,6 +71,7 @@ Some cool features of AnythingLLM
- [LM Studio (all models)](https://lmstudio.ai)
- [LocalAi (all models)](https://localai.io/)
- [Together AI (chat models)](https://www.together.ai/)
+- [Perplexity (chat models)](https://www.perplexity.ai/)
- [Mistral](https://mistral.ai/)
**Supported Embedding models:**
@@ -108,8 +109,8 @@ Mintplex Labs & the community maintain a number of deployment methods, scripts,
|----------------------------------------|----:|-----|---------------|------------|
| [![Deploy on Docker][docker-btn]][docker-deploy] | [![Deploy on AWS][aws-btn]][aws-deploy] | [![Deploy on GCP][gcp-btn]][gcp-deploy] | [![Deploy on DigitalOcean][do-btn]][aws-deploy] | [![Deploy on Render.com][render-btn]][render-deploy] |
-| Railway |
-|----------------------------------------|
+| Railway |
+| --------------------------------------------------- |
| [![Deploy on Railway][railway-btn]][railway-deploy] |
[or set up a production AnythingLLM instance without Docker →](./BARE_METAL.md)
diff --git a/docker/.env.example b/docker/.env.example
index b14d3c6e..eed50578 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -48,6 +48,10 @@ GID='1000'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
+# LLM_PROVIDER='perplexity'
+# PERPLEXITY_API_KEY='my-perplexity-key'
+# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
+
# LLM_PROVIDER='huggingface'
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
diff --git a/frontend/src/components/LLMSelection/PerplexityOptions/index.jsx b/frontend/src/components/LLMSelection/PerplexityOptions/index.jsx
new file mode 100644
index 00000000..0b392cf4
--- /dev/null
+++ b/frontend/src/components/LLMSelection/PerplexityOptions/index.jsx
@@ -0,0 +1,88 @@
+import System from "@/models/system";
+import { useState, useEffect } from "react";
+
+export default function PerplexityOptions({ settings }) {
+ return (
+
+
+
+
+
+
+
+ );
+}
+
+function PerplexityModelSelection({ settings }) {
+ const [customModels, setCustomModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ setLoading(true);
+ const { models } = await System.customModels("perplexity");
+ setCustomModels(models || []);
+ setLoading(false);
+ }
+ findCustomModels();
+ }, []);
+
+ if (loading || customModels.length == 0) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/perplexity.png b/frontend/src/media/llmprovider/perplexity.png
new file mode 100644
index 00000000..f4767169
Binary files /dev/null and b/frontend/src/media/llmprovider/perplexity.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 45ad5fd7..8c51e559 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -14,6 +14,7 @@ import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
+import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
@@ -26,8 +27,10 @@ import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
+
import LLMItem from "@/components/LLMSelection/LLMItem";
import { MagnifyingGlass } from "@phosphor-icons/react";
+import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
export default function GeneralLLMPreference() {
const [saving, setSaving] = useState(false);
@@ -153,6 +156,14 @@ export default function GeneralLLMPreference() {
options: ,
description: "Run open source models from Mistral AI.",
},
+ {
+ name: "Perplexity AI",
+ value: "perplexity",
+ logo: PerplexityLogo,
+ options: ,
+ description:
+ "Run powerful and internet-connected models hosted by Perplexity AI.",
+ },
{
name: "Native",
value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index c86a62a4..f9c4c416 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -11,6 +11,7 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
+import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
import AstraDBLogo from "@/media/vectordbs/astraDB.png";
import ChromaLogo from "@/media/vectordbs/chroma.png";
@@ -109,6 +110,14 @@ const LLM_SELECTION_PRIVACY = {
],
logo: HuggingFaceLogo,
},
+ perplexity: {
+ name: "Perplexity AI",
+ description: [
+ "Your chats will not be used for training",
+ "Your prompts and document text used in response creation are visible to Perplexity AI",
+ ],
+ logo: PerplexityLogo,
+ },
};
const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 6970dfa1..296a28d9 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -11,6 +11,7 @@ import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
+import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
@@ -21,12 +22,13 @@ import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
+import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
+import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
import paths from "@/utils/paths";
import showToast from "@/utils/toast";
import { useNavigate } from "react-router-dom";
-import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
const TITLE = "LLM Preference";
const DESCRIPTION =
@@ -128,6 +130,14 @@ export default function LLMPreference({
options: ,
description: "Run open source models from Mistral AI.",
},
+ {
+ name: "Perplexity AI",
+ value: "perplexity",
+ logo: PerplexityLogo,
+ options: ,
+ description:
+ "Run powerful and internet-connected models hosted by Perplexity AI.",
+ },
{
name: "Native",
value: "native",
diff --git a/server/.env.example b/server/.env.example
index ec6abcac..863486ad 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -41,6 +41,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
+# LLM_PROVIDER='perplexity'
+# PERPLEXITY_API_KEY='my-perplexity-key'
+# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
+
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 29949d3d..41544828 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -176,6 +176,18 @@ const SystemSettings = {
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
+ // For embedding credentials when ollama is selected.
+ OpenAiKey: !!process.env.OPEN_AI_KEY,
+ AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
+ AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
+ AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
+ }
+ : {}),
+ ...(llmProvider === "perplexity"
+ ? {
+ PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
+ PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,
+
// For embedding credentials when ollama is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
diff --git a/server/utils/AiProviders/perplexity/index.js b/server/utils/AiProviders/perplexity/index.js
new file mode 100644
index 00000000..df20df20
--- /dev/null
+++ b/server/utils/AiProviders/perplexity/index.js
@@ -0,0 +1,204 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const { chatPrompt } = require("../../chats");
+const { handleDefaultStreamResponse } = require("../../helpers/chat/responses");
+
+function perplexityModels() {
+ const { MODELS } = require("./models.js");
+ return MODELS || {};
+}
+
+class PerplexityLLM {
+ constructor(embedder = null, modelPreference = null) {
+ const { Configuration, OpenAIApi } = require("openai");
+ if (!process.env.PERPLEXITY_API_KEY)
+ throw new Error("No Perplexity API key was set.");
+
+ const config = new Configuration({
+ basePath: "https://api.perplexity.ai",
+ apiKey: process.env.PERPLEXITY_API_KEY,
+ });
+ this.openai = new OpenAIApi(config);
+ this.model =
+ modelPreference || process.env.PERPLEXITY_MODEL_PREF || "pplx-7b-online"; // Give at least a unique model to the provider as last fallback.
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = !embedder ? new NativeEmbedder() : embedder;
+ this.defaultTemp = 0.7;
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ allModelInformation() {
+ return perplexityModels();
+ }
+
+ streamingEnabled() {
+ return "streamChat" in this && "streamGetChatCompletion" in this;
+ }
+
+ promptWindowLimit() {
+ const availableModels = this.allModelInformation();
+ return availableModels[this.model]?.maxLength || 4096;
+ }
+
+ async isValidChatCompletionModel(model = "") {
+ const availableModels = this.allModelInformation();
+ return availableModels.hasOwnProperty(model);
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ async isSafe(_input = "") {
+ // Not implemented so must be stubbed
+ return { safe: true, reasons: [] };
+ }
+
+ async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `Perplexity chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const textResponse = await this.openai
+ .createChatCompletion({
+ model: this.model,
+ temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+ n: 1,
+ messages: await this.compressMessages(
+ {
+ systemPrompt: chatPrompt(workspace),
+ userPrompt: prompt,
+ chatHistory,
+ },
+ rawHistory
+ ),
+ })
+ .then((json) => {
+ const res = json.data;
+ if (!res.hasOwnProperty("choices"))
+ throw new Error("Perplexity chat: No results!");
+ if (res.choices.length === 0)
+ throw new Error("Perplexity chat: No results length!");
+ return res.choices[0].message.content;
+ })
+ .catch((error) => {
+ throw new Error(
+ `Perplexity::createChatCompletion failed with: ${error.message}`
+ );
+ });
+
+ return textResponse;
+ }
+
+ async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `Perplexity chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const streamRequest = await this.openai.createChatCompletion(
+ {
+ model: this.model,
+ stream: true,
+ temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+ n: 1,
+ messages: await this.compressMessages(
+ {
+ systemPrompt: chatPrompt(workspace),
+ userPrompt: prompt,
+ chatHistory,
+ },
+ rawHistory
+ ),
+ },
+ { responseType: "stream" }
+ );
+ return streamRequest;
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `Perplexity chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const { data } = await this.openai
+ .createChatCompletion({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.response.data.error.message);
+ });
+
+ if (!data.hasOwnProperty("choices")) return null;
+ return data.choices[0].message.content;
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `Perplexity chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const streamRequest = await this.openai.createChatCompletion(
+ {
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ },
+ { responseType: "stream" }
+ );
+ return streamRequest;
+ }
+
+ handleStream(response, stream, responseProps) {
+ return handleDefaultStreamResponse(response, stream, responseProps);
+ }
+
+ // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+module.exports = {
+ PerplexityLLM,
+ perplexityModels,
+};
diff --git a/server/utils/AiProviders/perplexity/models.js b/server/utils/AiProviders/perplexity/models.js
new file mode 100644
index 00000000..258cfeac
--- /dev/null
+++ b/server/utils/AiProviders/perplexity/models.js
@@ -0,0 +1,49 @@
+const MODELS = {
+ "codellama-34b-instruct": {
+ id: "codellama-34b-instruct",
+ name: "codellama-34b-instruct",
+ maxLength: 16384,
+ },
+ "codellama-70b-instruct": {
+ id: "codellama-70b-instruct",
+ name: "codellama-70b-instruct",
+ maxLength: 16384,
+ },
+ "llama-2-70b-chat": {
+ id: "llama-2-70b-chat",
+ name: "llama-2-70b-chat",
+ maxLength: 4096,
+ },
+ "mistral-7b-instruct": {
+ id: "mistral-7b-instruct",
+ name: "mistral-7b-instruct",
+ maxLength: 8192,
+ },
+ "mixtral-8x7b-instruct": {
+ id: "mixtral-8x7b-instruct",
+ name: "mixtral-8x7b-instruct",
+ maxLength: 8192,
+ },
+ "pplx-7b-chat": {
+ id: "pplx-7b-chat",
+ name: "pplx-7b-chat",
+ maxLength: 8192,
+ },
+ "pplx-70b-chat": {
+ id: "pplx-70b-chat",
+ name: "pplx-70b-chat",
+ maxLength: 8192,
+ },
+ "pplx-7b-online": {
+ id: "pplx-7b-online",
+ name: "pplx-7b-online",
+ maxLength: 8192,
+ },
+ "pplx-70b-online": {
+ id: "pplx-70b-online",
+ name: "pplx-70b-online",
+ maxLength: 8192,
+ },
+};
+
+module.exports.MODELS = MODELS;
diff --git a/server/utils/AiProviders/perplexity/scripts/.gitignore b/server/utils/AiProviders/perplexity/scripts/.gitignore
new file mode 100644
index 00000000..94a2dd14
--- /dev/null
+++ b/server/utils/AiProviders/perplexity/scripts/.gitignore
@@ -0,0 +1 @@
+*.json
\ No newline at end of file
diff --git a/server/utils/AiProviders/perplexity/scripts/chat_models.txt b/server/utils/AiProviders/perplexity/scripts/chat_models.txt
new file mode 100644
index 00000000..83f6d2a8
--- /dev/null
+++ b/server/utils/AiProviders/perplexity/scripts/chat_models.txt
@@ -0,0 +1,11 @@
+| Model | Context Length | Model Type |
+| :------------------------ | :------------- | :-------------- |
+| `codellama-34b-instruct` | 16384 | Chat Completion |
+| `codellama-70b-instruct` | 16384 | Chat Completion |
+| `llama-2-70b-chat` | 4096 | Chat Completion |
+| `mistral-7b-instruct` [2] | 8192 [1] | Chat Completion |
+| `mixtral-8x7b-instruct` | 8192 [1] | Chat Completion |
+| `pplx-7b-chat` | 8192 | Chat Completion |
+| `pplx-70b-chat` | 8192 | Chat Completion |
+| `pplx-7b-online` | 8192 | Chat Completion |
+| `pplx-70b-online` | 8192 | Chat Completion |
\ No newline at end of file
diff --git a/server/utils/AiProviders/perplexity/scripts/parse.mjs b/server/utils/AiProviders/perplexity/scripts/parse.mjs
new file mode 100644
index 00000000..749a63dc
--- /dev/null
+++ b/server/utils/AiProviders/perplexity/scripts/parse.mjs
@@ -0,0 +1,44 @@
+// Perplexity does not provide a simple REST API to get models,
+// so we have a table which we copy from their documentation
+// https://docs.perplexity.ai/edit/model-cards that we can
+// then parse and get all models from in a format that makes sense
+// Why this does not exist is so bizarre, but whatever.
+
+// To run, cd into this directory and run `node parse.mjs`
+// copy outputs into the export in ../models.js
+
+// Update the date below if you run this again because Perplexity added new models.
+// Last Collected: Feb 22, 2024
+
+import fs from "fs";
+
+function parseChatModels() {
+ const models = {};
+ const tableString = fs.readFileSync("chat_models.txt", { encoding: "utf-8" });
+ const rows = tableString.split("\n").slice(2);
+
+ rows.forEach((row) => {
+ let [model, contextLength] = row
+ .split("|")
+ .slice(1, -1)
+ .map((text) => text.trim());
+ model = model.replace(/`|\s*\[\d+\]\s*/g, "");
+ const maxLength = Number(contextLength.replace(/\s*\[\d+\]\s*/g, ""));
+ if (model && maxLength) {
+ models[model] = {
+ id: model,
+ name: model,
+ maxLength: maxLength,
+ };
+ }
+ });
+
+ fs.writeFileSync(
+ "chat_models.json",
+ JSON.stringify(models, null, 2),
+ "utf-8"
+ );
+ return models;
+}
+
+parseChatModels();
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 53c641e7..8f8ca065 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -1,3 +1,4 @@
+const { perplexityModels } = require("../AiProviders/perplexity");
const { togetherAiModels } = require("../AiProviders/togetherAi");
const SUPPORT_CUSTOM_MODELS = [
"openai",
@@ -6,6 +7,7 @@ const SUPPORT_CUSTOM_MODELS = [
"native-llm",
"togetherai",
"mistral",
+ "perplexity",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -25,6 +27,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getMistralModels(apiKey);
case "native-llm":
return nativeLLMModels();
+ case "perplexity":
+ return await getPerplexityModels();
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -120,6 +124,20 @@ async function getTogetherAiModels() {
return { models, error: null };
}
+async function getPerplexityModels() {
+ const knownModels = perplexityModels();
+ if (!Object.keys(knownModels).length === 0)
+ return { models: [], error: null };
+
+ const models = Object.values(knownModels).map((model) => {
+ return {
+ id: model.id,
+ name: model.name,
+ };
+ });
+ return { models, error: null };
+}
+
async function getMistralModels(apiKey = null) {
const { Configuration, OpenAIApi } = require("openai");
const config = new Configuration({
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 42ed262f..818d92db 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -58,6 +58,9 @@ function getLLMProvider(modelPreference = null) {
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return new TogetherAiLLM(embedder, modelPreference);
+ case "perplexity":
+ const { PerplexityLLM } = require("../AiProviders/perplexity");
+ return new PerplexityLLM(embedder, modelPreference);
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return new MistralLLM(embedder, modelPreference);
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index f89a193f..5a384740 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -239,6 +239,16 @@ const KEY_MAPPING = {
checks: [isNotEmpty],
},
+ // Perplexity Options
+ PerplexityApiKey: {
+ envKey: "PERPLEXITY_API_KEY",
+ checks: [isNotEmpty],
+ },
+ PerplexityModelPref: {
+ envKey: "PERPLEXITY_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
+
// System Settings
AuthToken: {
envKey: "AUTH_TOKEN",
@@ -314,6 +324,7 @@ function supportedLLM(input = "") {
"togetherai",
"mistral",
"huggingface",
+ "perplexity",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}