mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-19 12:40:09 +01:00
Merge branch 'master' of github.com:Mintplex-Labs/anything-llm into render
This commit is contained in:
commit
540d18ec84
@ -25,7 +25,7 @@ app.use(
|
||||
);
|
||||
|
||||
app.post("/process", async function (request, response) {
|
||||
const { filename } = reqBody(request);
|
||||
const { filename, options = {} } = reqBody(request);
|
||||
try {
|
||||
const targetFilename = path
|
||||
.normalize(filename)
|
||||
@ -34,7 +34,7 @@ app.post("/process", async function (request, response) {
|
||||
success,
|
||||
reason,
|
||||
documents = [],
|
||||
} = await processSingleFile(targetFilename);
|
||||
} = await processSingleFile(targetFilename, options);
|
||||
response
|
||||
.status(200)
|
||||
.json({ filename: targetFilename, success, reason, documents });
|
||||
|
@ -33,6 +33,7 @@
|
||||
"moment": "^2.29.4",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"officeparser": "^4.0.5",
|
||||
"openai": "^3.2.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"puppeteer": "~21.5.2",
|
||||
"slugify": "^1.6.6",
|
||||
@ -46,4 +47,4 @@
|
||||
"nodemon": "^2.0.22",
|
||||
"prettier": "^2.4.1"
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,3 @@
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { v4 } = require("uuid");
|
||||
const {
|
||||
createdDate,
|
||||
@ -9,39 +7,35 @@ const {
|
||||
const { tokenizeString } = require("../../utils/tokenizer");
|
||||
const { default: slugify } = require("slugify");
|
||||
const { LocalWhisper } = require("../../utils/WhisperProviders/localWhisper");
|
||||
const { OpenAiWhisper } = require("../../utils/WhisperProviders/OpenAiWhisper");
|
||||
|
||||
async function asAudio({ fullFilePath = "", filename = "" }) {
|
||||
const whisper = new LocalWhisper();
|
||||
const WHISPER_PROVIDERS = {
|
||||
openai: OpenAiWhisper,
|
||||
local: LocalWhisper,
|
||||
};
|
||||
|
||||
async function asAudio({ fullFilePath = "", filename = "", options = {} }) {
|
||||
const WhisperProvider = WHISPER_PROVIDERS.hasOwnProperty(
|
||||
options?.whisperProvider
|
||||
)
|
||||
? WHISPER_PROVIDERS[options?.whisperProvider]
|
||||
: WHISPER_PROVIDERS.local;
|
||||
|
||||
console.log(`-- Working ${filename} --`);
|
||||
const transcriberPromise = new Promise((resolve) =>
|
||||
whisper.client().then((client) => resolve(client))
|
||||
);
|
||||
const audioDataPromise = new Promise((resolve) =>
|
||||
convertToWavAudioData(fullFilePath).then((audioData) => resolve(audioData))
|
||||
);
|
||||
const [audioData, transcriber] = await Promise.all([
|
||||
audioDataPromise,
|
||||
transcriberPromise,
|
||||
]);
|
||||
const whisper = new WhisperProvider({ options });
|
||||
const { content, error } = await whisper.processFile(fullFilePath, filename);
|
||||
|
||||
if (!audioData) {
|
||||
console.error(`Failed to parse content from ${filename}.`);
|
||||
if (!!error) {
|
||||
console.error(`Error encountered for parsing of ${filename}.`);
|
||||
trashFile(fullFilePath);
|
||||
return {
|
||||
success: false,
|
||||
reason: `Failed to parse content from ${filename}.`,
|
||||
reason: error,
|
||||
documents: [],
|
||||
};
|
||||
}
|
||||
|
||||
console.log(`[Model Working]: Transcribing audio data to text`);
|
||||
const { text: content } = await transcriber(audioData, {
|
||||
chunk_length_s: 30,
|
||||
stride_length_s: 5,
|
||||
});
|
||||
|
||||
if (!content.length) {
|
||||
if (!content?.length) {
|
||||
console.error(`Resulting text content was empty for ${filename}.`);
|
||||
trashFile(fullFilePath);
|
||||
return {
|
||||
@ -76,79 +70,4 @@ async function asAudio({ fullFilePath = "", filename = "" }) {
|
||||
return { success: true, reason: null, documents: [document] };
|
||||
}
|
||||
|
||||
async function convertToWavAudioData(sourcePath) {
|
||||
try {
|
||||
let buffer;
|
||||
const wavefile = require("wavefile");
|
||||
const ffmpeg = require("fluent-ffmpeg");
|
||||
const outFolder = path.resolve(__dirname, `../../storage/tmp`);
|
||||
if (!fs.existsSync(outFolder)) fs.mkdirSync(outFolder, { recursive: true });
|
||||
|
||||
const fileExtension = path.extname(sourcePath).toLowerCase();
|
||||
if (fileExtension !== ".wav") {
|
||||
console.log(
|
||||
`[Conversion Required] ${fileExtension} file detected - converting to .wav`
|
||||
);
|
||||
const outputFile = path.resolve(outFolder, `${v4()}.wav`);
|
||||
const convert = new Promise((resolve) => {
|
||||
ffmpeg(sourcePath)
|
||||
.toFormat("wav")
|
||||
.on("error", (error) => {
|
||||
console.error(`[Conversion Error] ${error.message}`);
|
||||
resolve(false);
|
||||
})
|
||||
.on("progress", (progress) =>
|
||||
console.log(
|
||||
`[Conversion Processing]: ${progress.targetSize}KB converted`
|
||||
)
|
||||
)
|
||||
.on("end", () => {
|
||||
console.log("[Conversion Complete]: File converted to .wav!");
|
||||
resolve(true);
|
||||
})
|
||||
.save(outputFile);
|
||||
});
|
||||
const success = await convert;
|
||||
if (!success)
|
||||
throw new Error(
|
||||
"[Conversion Failed]: Could not convert file to .wav format!"
|
||||
);
|
||||
|
||||
const chunks = [];
|
||||
const stream = fs.createReadStream(outputFile);
|
||||
for await (let chunk of stream) chunks.push(chunk);
|
||||
buffer = Buffer.concat(chunks);
|
||||
fs.rmSync(outputFile);
|
||||
} else {
|
||||
const chunks = [];
|
||||
const stream = fs.createReadStream(sourcePath);
|
||||
for await (let chunk of stream) chunks.push(chunk);
|
||||
buffer = Buffer.concat(chunks);
|
||||
}
|
||||
|
||||
const wavFile = new wavefile.WaveFile(buffer);
|
||||
wavFile.toBitDepth("32f");
|
||||
wavFile.toSampleRate(16000);
|
||||
|
||||
let audioData = wavFile.getSamples();
|
||||
if (Array.isArray(audioData)) {
|
||||
if (audioData.length > 1) {
|
||||
const SCALING_FACTOR = Math.sqrt(2);
|
||||
|
||||
// Merge channels into first channel to save memory
|
||||
for (let i = 0; i < audioData[0].length; ++i) {
|
||||
audioData[0][i] =
|
||||
(SCALING_FACTOR * (audioData[0][i] + audioData[1][i])) / 2;
|
||||
}
|
||||
}
|
||||
audioData = audioData[0];
|
||||
}
|
||||
|
||||
return audioData;
|
||||
} catch (error) {
|
||||
console.error(`convertToWavAudioData`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = asAudio;
|
||||
|
@ -7,7 +7,7 @@ const {
|
||||
const { trashFile, isTextType } = require("../utils/files");
|
||||
const RESERVED_FILES = ["__HOTDIR__.md"];
|
||||
|
||||
async function processSingleFile(targetFilename) {
|
||||
async function processSingleFile(targetFilename, options = {}) {
|
||||
const fullFilePath = path.resolve(WATCH_DIRECTORY, targetFilename);
|
||||
if (RESERVED_FILES.includes(targetFilename))
|
||||
return {
|
||||
@ -54,6 +54,7 @@ async function processSingleFile(targetFilename) {
|
||||
return await FileTypeProcessor({
|
||||
fullFilePath,
|
||||
filename: targetFilename,
|
||||
options,
|
||||
});
|
||||
}
|
||||
|
||||
|
44
collector/utils/WhisperProviders/OpenAiWhisper.js
Normal file
44
collector/utils/WhisperProviders/OpenAiWhisper.js
Normal file
@ -0,0 +1,44 @@
|
||||
const fs = require("fs");
|
||||
|
||||
class OpenAiWhisper {
|
||||
constructor({ options }) {
|
||||
const { Configuration, OpenAIApi } = require("openai");
|
||||
if (!options.openAiKey) throw new Error("No OpenAI API key was set.");
|
||||
|
||||
const config = new Configuration({
|
||||
apiKey: options.openAiKey,
|
||||
});
|
||||
this.openai = new OpenAIApi(config);
|
||||
this.model = "whisper-1";
|
||||
this.temperature = 0;
|
||||
this.#log("Initialized.");
|
||||
}
|
||||
|
||||
#log(text, ...args) {
|
||||
console.log(`\x1b[32m[OpenAiWhisper]\x1b[0m ${text}`, ...args);
|
||||
}
|
||||
|
||||
async processFile(fullFilePath) {
|
||||
return await this.openai
|
||||
.createTranscription(
|
||||
fs.createReadStream(fullFilePath),
|
||||
this.model,
|
||||
undefined,
|
||||
"text",
|
||||
this.temperature
|
||||
)
|
||||
.then((res) => {
|
||||
if (res.hasOwnProperty("data"))
|
||||
return { content: res.data, error: null };
|
||||
return { content: "", error: "No content was able to be transcribed." };
|
||||
})
|
||||
.catch((e) => {
|
||||
this.#log(`Could not get any response from openai whisper`, e.message);
|
||||
return { content: "", error: e.message };
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
OpenAiWhisper,
|
||||
};
|
@ -1,5 +1,6 @@
|
||||
const path = require("path");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { v4 } = require("uuid");
|
||||
|
||||
class LocalWhisper {
|
||||
constructor() {
|
||||
@ -16,12 +17,94 @@ class LocalWhisper {
|
||||
// Make directory when it does not exist in existing installations
|
||||
if (!fs.existsSync(this.cacheDir))
|
||||
fs.mkdirSync(this.cacheDir, { recursive: true });
|
||||
|
||||
this.#log("Initialized.");
|
||||
}
|
||||
|
||||
#log(text, ...args) {
|
||||
console.log(`\x1b[32m[LocalWhisper]\x1b[0m ${text}`, ...args);
|
||||
}
|
||||
|
||||
async #convertToWavAudioData(sourcePath) {
|
||||
try {
|
||||
let buffer;
|
||||
const wavefile = require("wavefile");
|
||||
const ffmpeg = require("fluent-ffmpeg");
|
||||
const outFolder = path.resolve(__dirname, `../../storage/tmp`);
|
||||
if (!fs.existsSync(outFolder))
|
||||
fs.mkdirSync(outFolder, { recursive: true });
|
||||
|
||||
const fileExtension = path.extname(sourcePath).toLowerCase();
|
||||
if (fileExtension !== ".wav") {
|
||||
this.#log(
|
||||
`File conversion required! ${fileExtension} file detected - converting to .wav`
|
||||
);
|
||||
const outputFile = path.resolve(outFolder, `${v4()}.wav`);
|
||||
const convert = new Promise((resolve) => {
|
||||
ffmpeg(sourcePath)
|
||||
.toFormat("wav")
|
||||
.on("error", (error) => {
|
||||
this.#log(`Conversion Error! ${error.message}`);
|
||||
resolve(false);
|
||||
})
|
||||
.on("progress", (progress) =>
|
||||
this.#log(
|
||||
`Conversion Processing! ${progress.targetSize}KB converted`
|
||||
)
|
||||
)
|
||||
.on("end", () => {
|
||||
this.#log(`Conversion Complete! File converted to .wav!`);
|
||||
resolve(true);
|
||||
})
|
||||
.save(outputFile);
|
||||
});
|
||||
const success = await convert;
|
||||
if (!success)
|
||||
throw new Error(
|
||||
"[Conversion Failed]: Could not convert file to .wav format!"
|
||||
);
|
||||
|
||||
const chunks = [];
|
||||
const stream = fs.createReadStream(outputFile);
|
||||
for await (let chunk of stream) chunks.push(chunk);
|
||||
buffer = Buffer.concat(chunks);
|
||||
fs.rmSync(outputFile);
|
||||
} else {
|
||||
const chunks = [];
|
||||
const stream = fs.createReadStream(sourcePath);
|
||||
for await (let chunk of stream) chunks.push(chunk);
|
||||
buffer = Buffer.concat(chunks);
|
||||
}
|
||||
|
||||
const wavFile = new wavefile.WaveFile(buffer);
|
||||
wavFile.toBitDepth("32f");
|
||||
wavFile.toSampleRate(16000);
|
||||
|
||||
let audioData = wavFile.getSamples();
|
||||
if (Array.isArray(audioData)) {
|
||||
if (audioData.length > 1) {
|
||||
const SCALING_FACTOR = Math.sqrt(2);
|
||||
|
||||
// Merge channels into first channel to save memory
|
||||
for (let i = 0; i < audioData[0].length; ++i) {
|
||||
audioData[0][i] =
|
||||
(SCALING_FACTOR * (audioData[0][i] + audioData[1][i])) / 2;
|
||||
}
|
||||
}
|
||||
audioData = audioData[0];
|
||||
}
|
||||
|
||||
return audioData;
|
||||
} catch (error) {
|
||||
console.error(`convertToWavAudioData`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async client() {
|
||||
if (!fs.existsSync(this.modelPath)) {
|
||||
console.log(
|
||||
"\x1b[34m[INFO]\x1b[0m The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~250MB)\n\n"
|
||||
this.#log(
|
||||
`The native whisper model has never been run and will be downloaded right now. Subsequent runs will be faster. (~250MB)`
|
||||
);
|
||||
}
|
||||
|
||||
@ -48,10 +131,45 @@ class LocalWhisper {
|
||||
: {}),
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Failed to load the native whisper model:", error);
|
||||
this.#log("Failed to load the native whisper model:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async processFile(fullFilePath, filename) {
|
||||
try {
|
||||
const transcriberPromise = new Promise((resolve) =>
|
||||
this.client().then((client) => resolve(client))
|
||||
);
|
||||
const audioDataPromise = new Promise((resolve) =>
|
||||
this.#convertToWavAudioData(fullFilePath).then((audioData) =>
|
||||
resolve(audioData)
|
||||
)
|
||||
);
|
||||
const [audioData, transcriber] = await Promise.all([
|
||||
audioDataPromise,
|
||||
transcriberPromise,
|
||||
]);
|
||||
|
||||
if (!audioData) {
|
||||
this.#log(`Failed to parse content from ${filename}.`);
|
||||
return {
|
||||
content: null,
|
||||
error: `Failed to parse content from ${filename}.`,
|
||||
};
|
||||
}
|
||||
|
||||
this.#log(`Transcribing audio data to text...`);
|
||||
const { text } = await transcriber(audioData, {
|
||||
chunk_length_s: 30,
|
||||
stride_length_s: 5,
|
||||
});
|
||||
|
||||
return { content: text, error: null };
|
||||
} catch (error) {
|
||||
return { content: null, error: error.message };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
|
@ -372,6 +372,13 @@ asynckit@^0.4.0:
|
||||
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
|
||||
integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
|
||||
|
||||
axios@^0.26.0:
|
||||
version "0.26.1"
|
||||
resolved "https://registry.yarnpkg.com/axios/-/axios-0.26.1.tgz#1ede41c51fcf51bbbd6fd43669caaa4f0495aaa9"
|
||||
integrity sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==
|
||||
dependencies:
|
||||
follow-redirects "^1.14.8"
|
||||
|
||||
b4a@^1.6.4:
|
||||
version "1.6.4"
|
||||
resolved "https://registry.yarnpkg.com/b4a/-/b4a-1.6.4.tgz#ef1c1422cae5ce6535ec191baeed7567443f36c9"
|
||||
@ -1203,6 +1210,11 @@ fluent-ffmpeg@^2.1.2:
|
||||
async ">=0.2.9"
|
||||
which "^1.1.1"
|
||||
|
||||
follow-redirects@^1.14.8:
|
||||
version "1.15.6"
|
||||
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b"
|
||||
integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==
|
||||
|
||||
form-data-encoder@1.7.2:
|
||||
version "1.7.2"
|
||||
resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-1.7.2.tgz#1f1ae3dccf58ed4690b86d87e4f57c654fbab040"
|
||||
@ -2304,6 +2316,14 @@ onnxruntime-web@1.14.0:
|
||||
onnxruntime-common "~1.14.0"
|
||||
platform "^1.3.6"
|
||||
|
||||
openai@^3.2.1:
|
||||
version "3.3.0"
|
||||
resolved "https://registry.yarnpkg.com/openai/-/openai-3.3.0.tgz#a6408016ad0945738e1febf43f2fccca83a3f532"
|
||||
integrity sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==
|
||||
dependencies:
|
||||
axios "^0.26.0"
|
||||
form-data "^4.0.0"
|
||||
|
||||
openai@^4.19.0:
|
||||
version "4.20.1"
|
||||
resolved "https://registry.yarnpkg.com/openai/-/openai-4.20.1.tgz#afa0d496d125b5a0f6cebcb4b9aeabf71e00214e"
|
||||
|
@ -131,6 +131,16 @@ GID='1000'
|
||||
# ASTRA_DB_APPLICATION_TOKEN=
|
||||
# ASTRA_DB_ENDPOINT=
|
||||
|
||||
###########################################
|
||||
######## Audio Model Selection ############
|
||||
###########################################
|
||||
# (default) use built-in whisper-small model.
|
||||
# WHISPER_PROVIDER="local"
|
||||
|
||||
# use openai hosted whisper model.
|
||||
# WHISPER_PROVIDER="openai"
|
||||
# OPEN_AI_KEY=sk-xxxxxxxx
|
||||
|
||||
# CLOUD DEPLOYMENT VARIRABLES ONLY
|
||||
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
|
||||
# DISABLE_TELEMETRY="false"
|
||||
|
@ -29,6 +29,9 @@ const GeneralApiKeys = lazy(() => import("@/pages/GeneralSettings/ApiKeys"));
|
||||
const GeneralLLMPreference = lazy(
|
||||
() => import("@/pages/GeneralSettings/LLMPreference")
|
||||
);
|
||||
const GeneralTranscriptionPreference = lazy(
|
||||
() => import("@/pages/GeneralSettings/TranscriptionPreference")
|
||||
);
|
||||
const GeneralEmbeddingPreference = lazy(
|
||||
() => import("@/pages/GeneralSettings/EmbeddingPreference")
|
||||
);
|
||||
@ -47,6 +50,9 @@ const EmbedConfigSetup = lazy(
|
||||
() => import("@/pages/GeneralSettings/EmbedConfigs")
|
||||
);
|
||||
const EmbedChats = lazy(() => import("@/pages/GeneralSettings/EmbedChats"));
|
||||
const PrivacyAndData = lazy(
|
||||
() => import("@/pages/GeneralSettings/PrivacyAndData")
|
||||
);
|
||||
|
||||
export default function App() {
|
||||
return (
|
||||
@ -76,6 +82,12 @@ export default function App() {
|
||||
path="/settings/llm-preference"
|
||||
element={<AdminRoute Component={GeneralLLMPreference} />}
|
||||
/>
|
||||
<Route
|
||||
path="/settings/transcription-preference"
|
||||
element={
|
||||
<AdminRoute Component={GeneralTranscriptionPreference} />
|
||||
}
|
||||
/>
|
||||
<Route
|
||||
path="/settings/embedding-preference"
|
||||
element={<AdminRoute Component={GeneralEmbeddingPreference} />}
|
||||
@ -101,6 +113,10 @@ export default function App() {
|
||||
path="/settings/security"
|
||||
element={<ManagerRoute Component={GeneralSecurity} />}
|
||||
/>
|
||||
<Route
|
||||
path="/settings/privacy"
|
||||
element={<AdminRoute Component={PrivacyAndData} />}
|
||||
/>
|
||||
<Route
|
||||
path="/settings/appearance"
|
||||
element={<ManagerRoute Component={GeneralAppearance} />}
|
||||
|
@ -52,6 +52,7 @@ export default function AnthropicAiOptions({ settings, showAlert = false }) {
|
||||
"claude-instant-1.2",
|
||||
"claude-2.0",
|
||||
"claude-2.1",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
].map((model) => {
|
||||
|
@ -13,7 +13,6 @@ export default function FileRow({
|
||||
folderName,
|
||||
selected,
|
||||
toggleSelection,
|
||||
expanded,
|
||||
fetchKeys,
|
||||
setLoading,
|
||||
setLoadingMessage,
|
||||
@ -53,12 +52,13 @@ export default function FileRow({
|
||||
|
||||
const handleMouseEnter = debounce(handleShowTooltip, 500);
|
||||
const handleMouseLeave = debounce(handleHideTooltip, 500);
|
||||
|
||||
return (
|
||||
<div
|
||||
<tr
|
||||
onClick={() => toggleSelection(item)}
|
||||
className={`transition-all duration-200 text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 border-b border-white/20 hover:bg-sky-500/20 cursor-pointer ${`${
|
||||
selected ? "bg-sky-500/20" : ""
|
||||
} ${expanded ? "bg-sky-500/10" : ""}`}`}
|
||||
className={`transition-all duration-200 text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 hover:bg-sky-500/20 cursor-pointer file-row ${
|
||||
selected ? "selected" : ""
|
||||
}`}
|
||||
>
|
||||
<div className="pl-2 col-span-5 flex gap-x-[4px] items-center">
|
||||
<div
|
||||
@ -105,6 +105,6 @@ export default function FileRow({
|
||||
className="text-base font-bold w-4 h-4 ml-2 flex-shrink-0 cursor-pointer"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</tr>
|
||||
);
|
||||
}
|
||||
|
@ -47,10 +47,10 @@ export default function FolderRow({
|
||||
|
||||
return (
|
||||
<>
|
||||
<div
|
||||
<tr
|
||||
onClick={onRowClick}
|
||||
className={`transition-all duration-200 text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 border-b border-white/20 hover:bg-sky-500/20 cursor-pointer w-full ${
|
||||
selected ? "bg-sky-500/20" : ""
|
||||
className={`transition-all duration-200 text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 bg-[#2C2C2C] hover:bg-sky-500/20 cursor-pointer w-full file-row:0 ${
|
||||
selected ? "selected" : ""
|
||||
}`}
|
||||
>
|
||||
<div className="col-span-6 flex gap-x-[4px] items-center">
|
||||
@ -88,7 +88,7 @@ export default function FolderRow({
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</tr>
|
||||
{expanded && (
|
||||
<div className="col-span-full">
|
||||
{item.items.map((fileItem) => (
|
||||
@ -97,7 +97,6 @@ export default function FolderRow({
|
||||
item={fileItem}
|
||||
folderName={item.name}
|
||||
selected={isSelected(fileItem.id)}
|
||||
expanded={expanded}
|
||||
toggleSelection={toggleSelection}
|
||||
fetchKeys={fetchKeys}
|
||||
setLoading={setLoading}
|
||||
|
@ -53,8 +53,8 @@ export default function WorkspaceFileRow({
|
||||
const handleMouseLeave = debounce(handleHideTooltip, 500);
|
||||
return (
|
||||
<div
|
||||
className={`items-center transition-all duration-200 text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 border-b border-white/20 hover:bg-sky-500/20 cursor-pointer
|
||||
${isMovedItem ? "bg-green-800/40" : ""}`}
|
||||
className={`items-center transition-all duration-200 text-white/80 text-xs grid grid-cols-12 py-2 pl-3.5 pr-8 hover:bg-sky-500/20 cursor-pointer
|
||||
${isMovedItem ? "bg-green-800/40" : "file-row"}`}
|
||||
>
|
||||
<div className="col-span-5 flex gap-x-[4px] items-center">
|
||||
<File
|
||||
|
@ -29,7 +29,7 @@ function WorkspaceDirectory({
|
||||
</h3>
|
||||
</div>
|
||||
<div className="relative w-[560px] h-[445px] bg-zinc-900 rounded-2xl mt-5">
|
||||
<div className="text-white/80 text-xs grid grid-cols-12 py-2 px-8 border-b border-white/20">
|
||||
<div className="text-white/80 text-xs grid grid-cols-12 py-2 px-8">
|
||||
<p className="col-span-5">Name</p>
|
||||
<p className="col-span-3">Date</p>
|
||||
<p className="col-span-2">Kind</p>
|
||||
@ -148,7 +148,7 @@ const PinAlert = memo(() => {
|
||||
<ModalWrapper isOpen={showAlert}>
|
||||
<div className="relative w-full max-w-2xl max-h-full">
|
||||
<div className="relative bg-main-gradient rounded-lg shadow">
|
||||
<div className="flex items-start justify-between p-4 border-b rounded-t border-gray-500/50">
|
||||
<div className="flex items-start justify-between p-4 rounded-t border-gray-500/50">
|
||||
<div className="flex items-center gap-2">
|
||||
<PushPin className="text-red-600 text-lg w-6 h-6" weight="fill" />
|
||||
<h3 className="text-xl font-semibold text-white">
|
||||
|
@ -19,6 +19,8 @@ import {
|
||||
Notepad,
|
||||
CodeBlock,
|
||||
Barcode,
|
||||
ClosedCaptioning,
|
||||
EyeSlash,
|
||||
} from "@phosphor-icons/react";
|
||||
import useUser from "@/hooks/useUser";
|
||||
import { USER_BACKGROUND_COLOR } from "@/utils/constants";
|
||||
@ -278,9 +280,17 @@ const SidebarOptions = ({ user = null }) => (
|
||||
flex={true}
|
||||
allowedRole={["admin"]}
|
||||
/>
|
||||
<Option
|
||||
href={paths.settings.transcriptionPreference()}
|
||||
btnText="Transcription Model"
|
||||
icon={<ClosedCaptioning className="h-5 w-5 flex-shrink-0" />}
|
||||
user={user}
|
||||
flex={true}
|
||||
allowedRole={["admin"]}
|
||||
/>
|
||||
<Option
|
||||
href={paths.settings.embeddingPreference()}
|
||||
btnText="Embedding Preference"
|
||||
btnText="Embedding Model"
|
||||
icon={<FileCode className="h-5 w-5 flex-shrink-0" />}
|
||||
user={user}
|
||||
flex={true}
|
||||
@ -340,5 +350,13 @@ const SidebarOptions = ({ user = null }) => (
|
||||
flex={true}
|
||||
allowedRole={["admin"]}
|
||||
/>
|
||||
<Option
|
||||
href={paths.settings.privacy()}
|
||||
btnText="Privacy & Data"
|
||||
icon={<EyeSlash className="h-5 w-5 flex-shrink-0" />}
|
||||
user={user}
|
||||
flex={true}
|
||||
allowedRole={["admin"]}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
|
@ -0,0 +1,38 @@
|
||||
import { Gauge } from "@phosphor-icons/react";
|
||||
export default function NativeTranscriptionOptions() {
|
||||
return (
|
||||
<div className="w-full flex flex-col gap-y-4">
|
||||
<div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-4 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
|
||||
<div className="gap-x-2 flex items-center">
|
||||
<Gauge size={25} />
|
||||
<p className="text-sm">
|
||||
Using the local whisper model on machines with limited RAM or CPU
|
||||
can stall AnythingLLM when processing media files.
|
||||
<br />
|
||||
We recommend at least 2GB of RAM and upload files <10Mb.
|
||||
<br />
|
||||
<br />
|
||||
<i>
|
||||
The built-in model will automatically download on the first use.
|
||||
</i>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div className="w-full flex items-center gap-4">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Model Selection
|
||||
</label>
|
||||
<select
|
||||
disabled={true}
|
||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<option disabled={true} selected={true}>
|
||||
Xenova/whisper-small
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
import { useState } from "react";
|
||||
|
||||
export default function OpenAiWhisperOptions({ settings }) {
|
||||
const [inputValue, setInputValue] = useState(settings?.OpenAiKey);
|
||||
const [_openAIKey, setOpenAIKey] = useState(settings?.OpenAiKey);
|
||||
|
||||
return (
|
||||
<div className="flex gap-x-4">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="OpenAiKey"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="OpenAI API Key"
|
||||
defaultValue={settings?.OpenAiKey ? "*".repeat(20) : ""}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
onChange={(e) => setInputValue(e.target.value)}
|
||||
onBlur={() => setOpenAIKey(inputValue)}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Whisper Model
|
||||
</label>
|
||||
<select
|
||||
disabled={true}
|
||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<option disabled={true} selected={true}>
|
||||
Whisper Large
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
@ -5,8 +5,10 @@ import { useManageWorkspaceModal } from "../../../Modals/MangeWorkspace";
|
||||
import ManageWorkspace from "../../../Modals/MangeWorkspace";
|
||||
import { ArrowDown } from "@phosphor-icons/react";
|
||||
import debounce from "lodash.debounce";
|
||||
import useUser from "@/hooks/useUser";
|
||||
|
||||
export default function ChatHistory({ history = [], workspace, sendCommand }) {
|
||||
const { user } = useUser();
|
||||
const { showing, showModal, hideModal } = useManageWorkspaceModal();
|
||||
const [isAtBottom, setIsAtBottom] = useState(true);
|
||||
const chatHistoryRef = useRef(null);
|
||||
@ -56,16 +58,22 @@ export default function ChatHistory({ history = [], workspace, sendCommand }) {
|
||||
<p className="text-white/60 text-lg font-base py-4">
|
||||
Welcome to your new workspace.
|
||||
</p>
|
||||
<p className="w-full items-center text-white/60 text-lg font-base flex flex-col md:flex-row gap-x-1">
|
||||
To get started either{" "}
|
||||
<span
|
||||
className="underline font-medium cursor-pointer"
|
||||
onClick={showModal}
|
||||
>
|
||||
upload a document
|
||||
</span>
|
||||
or <b className="font-medium italic">send a chat.</b>
|
||||
</p>
|
||||
{!user || user.role !== "default" ? (
|
||||
<p className="w-full items-center text-white/60 text-lg font-base flex flex-col md:flex-row gap-x-1">
|
||||
To get started either{" "}
|
||||
<span
|
||||
className="underline font-medium cursor-pointer"
|
||||
onClick={showModal}
|
||||
>
|
||||
upload a document
|
||||
</span>
|
||||
or <b className="font-medium italic">send a chat.</b>
|
||||
</p>
|
||||
) : (
|
||||
<p className="w-full items-center text-white/60 text-lg font-base flex flex-col md:flex-row gap-x-1">
|
||||
To get started <b className="font-medium italic">send a chat.</b>
|
||||
</p>
|
||||
)}
|
||||
<WorkspaceChatSuggestions
|
||||
suggestions={workspace?.suggestedMessages ?? []}
|
||||
sendSuggestion={handleSendSuggestedMessage}
|
||||
|
@ -0,0 +1,50 @@
|
||||
import { ABORT_STREAM_EVENT } from "@/utils/chat";
|
||||
import { Tooltip } from "react-tooltip";
|
||||
|
||||
export default function StopGenerationButton() {
|
||||
function emitHaltEvent() {
|
||||
window.dispatchEvent(new CustomEvent(ABORT_STREAM_EVENT));
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<button
|
||||
type="button"
|
||||
onClick={emitHaltEvent}
|
||||
data-tooltip-id="stop-generation-button"
|
||||
data-tooltip-content="Stop generating response"
|
||||
className="border-none text-white/60 cursor-pointer group"
|
||||
>
|
||||
<svg
|
||||
width="28"
|
||||
height="28"
|
||||
viewBox="0 0 28 28"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<circle
|
||||
className="group-hover:stroke-[#46C8FF] stroke-white"
|
||||
cx="10"
|
||||
cy="10.562"
|
||||
r="9"
|
||||
stroke-width="2"
|
||||
/>
|
||||
<rect
|
||||
className="group-hover:fill-[#46C8FF] fill-white"
|
||||
x="6.3999"
|
||||
y="6.96204"
|
||||
width="7.2"
|
||||
height="7.2"
|
||||
rx="2"
|
||||
/>
|
||||
</svg>
|
||||
</button>
|
||||
<Tooltip
|
||||
id="stop-generation-button"
|
||||
place="bottom"
|
||||
delayShow={300}
|
||||
className="tooltip !text-xs invert"
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
<svg width="21" height="21" viewBox="0 0 21 21" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<circle cx="10.8984" cy="10.562" r="9" stroke="white" stroke-width="2"/>
|
||||
<rect x="7.29846" y="6.96204" width="7.2" height="7.2" rx="2" fill="white"/>
|
||||
</svg>
|
After Width: | Height: | Size: 253 B |
@ -1,4 +1,3 @@
|
||||
import { CircleNotch, PaperPlaneRight } from "@phosphor-icons/react";
|
||||
import React, { useState, useRef } from "react";
|
||||
import SlashCommandsButton, {
|
||||
SlashCommands,
|
||||
@ -6,6 +5,8 @@ import SlashCommandsButton, {
|
||||
} from "./SlashCommands";
|
||||
import { isMobile } from "react-device-detect";
|
||||
import debounce from "lodash.debounce";
|
||||
import { PaperPlaneRight } from "@phosphor-icons/react";
|
||||
import StopGenerationButton from "./StopGenerationButton";
|
||||
|
||||
export default function PromptInput({
|
||||
workspace,
|
||||
@ -83,19 +84,18 @@ export default function PromptInput({
|
||||
className="cursor-text max-h-[100px] md:min-h-[40px] mx-2 md:mx-0 py-2 w-full text-[16px] md:text-md text-white bg-transparent placeholder:text-white/60 resize-none active:outline-none focus:outline-none flex-grow"
|
||||
placeholder={"Send a message"}
|
||||
/>
|
||||
<button
|
||||
ref={formRef}
|
||||
type="submit"
|
||||
disabled={buttonDisabled}
|
||||
className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4"
|
||||
>
|
||||
{buttonDisabled ? (
|
||||
<CircleNotch className="w-6 h-6 animate-spin" />
|
||||
) : (
|
||||
{buttonDisabled ? (
|
||||
<StopGenerationButton />
|
||||
) : (
|
||||
<button
|
||||
ref={formRef}
|
||||
type="submit"
|
||||
className="inline-flex justify-center rounded-2xl cursor-pointer text-white/60 hover:text-white group ml-4"
|
||||
>
|
||||
<PaperPlaneRight className="w-7 h-7 my-3" weight="fill" />
|
||||
)}
|
||||
<span className="sr-only">Send message</span>
|
||||
</button>
|
||||
<span className="sr-only">Send message</span>
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex justify-between py-3.5">
|
||||
<div className="flex gap-x-2">
|
||||
|
@ -68,11 +68,7 @@ export default function ChatContainer({ workspace, knownHistory = [] }) {
|
||||
const remHistory = chatHistory.length > 0 ? chatHistory.slice(0, -1) : [];
|
||||
var _chatHistory = [...remHistory];
|
||||
|
||||
if (!promptMessage || !promptMessage?.userMessage) {
|
||||
setLoadingResponse(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!promptMessage || !promptMessage?.userMessage) return false;
|
||||
if (!!threadSlug) {
|
||||
await Workspace.threads.streamChat(
|
||||
{ workspaceSlug: workspace.slug, threadSlug },
|
||||
|
@ -19,6 +19,7 @@ const PROVIDER_DEFAULT_MODELS = {
|
||||
"claude-2.1",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
azure: [],
|
||||
lmstudio: [],
|
||||
|
@ -597,3 +597,19 @@ dialog::backdrop {
|
||||
font-weight: 600;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.file-row:nth-child(odd) {
|
||||
@apply bg-[#1C1E21];
|
||||
}
|
||||
|
||||
.file-row:nth-child(even) {
|
||||
@apply bg-[#2C2C2C];
|
||||
}
|
||||
|
||||
.file-row.selected:nth-child(odd) {
|
||||
@apply bg-sky-500/20;
|
||||
}
|
||||
|
||||
.file-row.selected:nth-child(even) {
|
||||
@apply bg-sky-500/10;
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ import { baseHeaders } from "@/utils/request";
|
||||
import { fetchEventSource } from "@microsoft/fetch-event-source";
|
||||
import WorkspaceThread from "@/models/workspaceThread";
|
||||
import { v4 } from "uuid";
|
||||
import { ABORT_STREAM_EVENT } from "@/utils/chat";
|
||||
|
||||
const Workspace = {
|
||||
new: async function (data = {}) {
|
||||
@ -75,6 +76,16 @@ const Workspace = {
|
||||
},
|
||||
streamChat: async function ({ slug }, message, handleChat) {
|
||||
const ctrl = new AbortController();
|
||||
|
||||
// Listen for the ABORT_STREAM_EVENT key to be emitted by the client
|
||||
// to early abort the streaming response. On abort we send a special `stopGeneration`
|
||||
// event to be handled which resets the UI for us to be able to send another message.
|
||||
// The backend response abort handling is done in each LLM's handleStreamResponse.
|
||||
window.addEventListener(ABORT_STREAM_EVENT, () => {
|
||||
ctrl.abort();
|
||||
handleChat({ id: v4(), type: "stopGeneration" });
|
||||
});
|
||||
|
||||
await fetchEventSource(`${API_BASE}/workspace/${slug}/stream-chat`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify({ message }),
|
||||
|
@ -1,3 +1,4 @@
|
||||
import { ABORT_STREAM_EVENT } from "@/utils/chat";
|
||||
import { API_BASE } from "@/utils/constants";
|
||||
import { baseHeaders } from "@/utils/request";
|
||||
import { fetchEventSource } from "@microsoft/fetch-event-source";
|
||||
@ -80,6 +81,16 @@ const WorkspaceThread = {
|
||||
handleChat
|
||||
) {
|
||||
const ctrl = new AbortController();
|
||||
|
||||
// Listen for the ABORT_STREAM_EVENT key to be emitted by the client
|
||||
// to early abort the streaming response. On abort we send a special `stopGeneration`
|
||||
// event to be handled which resets the UI for us to be able to send another message.
|
||||
// The backend response abort handling is done in each LLM's handleStreamResponse.
|
||||
window.addEventListener(ABORT_STREAM_EVENT, () => {
|
||||
ctrl.abort();
|
||||
handleChat({ id: v4(), type: "stopGeneration" });
|
||||
});
|
||||
|
||||
await fetchEventSource(
|
||||
`${API_BASE}/workspace/${workspaceSlug}/thread/${threadSlug}/stream-chat`,
|
||||
{
|
||||
|
206
frontend/src/pages/GeneralSettings/PrivacyAndData/index.jsx
Normal file
206
frontend/src/pages/GeneralSettings/PrivacyAndData/index.jsx
Normal file
@ -0,0 +1,206 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import Sidebar from "@/components/SettingsSidebar";
|
||||
import { isMobile } from "react-device-detect";
|
||||
import showToast from "@/utils/toast";
|
||||
import System from "@/models/system";
|
||||
import PreLoader from "@/components/Preloader";
|
||||
import {
|
||||
EMBEDDING_ENGINE_PRIVACY,
|
||||
LLM_SELECTION_PRIVACY,
|
||||
VECTOR_DB_PRIVACY,
|
||||
} from "@/pages/OnboardingFlow/Steps/DataHandling";
|
||||
|
||||
export default function PrivacyAndDataHandling() {
|
||||
const [settings, setSettings] = useState({});
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
async function fetchSettings() {
|
||||
setLoading(true);
|
||||
const settings = await System.keys();
|
||||
setSettings(settings);
|
||||
setLoading(false);
|
||||
}
|
||||
fetchSettings();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
||||
<Sidebar />
|
||||
<div
|
||||
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
||||
className="transition-all duration-500 relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll border-2 border-outline"
|
||||
>
|
||||
<div className="flex flex-col w-full px-1 md:px-20 md:py-12 py-16">
|
||||
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
||||
<div className="items-center flex gap-x-4">
|
||||
<p className="text-2xl font-semibold text-white">
|
||||
Privacy & Data-Handling
|
||||
</p>
|
||||
</div>
|
||||
<p className="text-sm font-base text-white text-opacity-60">
|
||||
This is your configuration for how connected third party providers
|
||||
and AnythingLLM handle your data.
|
||||
</p>
|
||||
</div>
|
||||
{loading ? (
|
||||
<div className="h-1/2 transition-all duration-500 relative md:ml-[2px] md:mr-[8px] md:my-[16px] md:rounded-[26px] p-[18px] h-full overflow-y-scroll">
|
||||
<div className="w-full h-full flex justify-center items-center">
|
||||
<PreLoader />
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<ThirdParty settings={settings} />
|
||||
<TelemetryLogs settings={settings} />
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ThirdParty({ settings }) {
|
||||
const llmChoice = settings?.LLMProvider || "openai";
|
||||
const embeddingEngine = settings?.EmbeddingEngine || "openai";
|
||||
const vectorDb = settings?.VectorDB || "pinecone";
|
||||
|
||||
return (
|
||||
<div className="py-8 w-full flex items-start justify-center flex-col gap-y-6 border-b-2 border-white/10">
|
||||
<div className="flex flex-col gap-8">
|
||||
<div className="flex flex-col gap-y-2 border-b border-zinc-500/50 pb-4">
|
||||
<div className="text-white text-base font-bold">LLM Selection</div>
|
||||
<div className="flex items-center gap-2.5">
|
||||
<img
|
||||
src={LLM_SELECTION_PRIVACY[llmChoice].logo}
|
||||
alt="LLM Logo"
|
||||
className="w-8 h-8 rounded"
|
||||
/>
|
||||
<p className="text-white text-sm font-bold">
|
||||
{LLM_SELECTION_PRIVACY[llmChoice].name}
|
||||
</p>
|
||||
</div>
|
||||
<ul className="flex flex-col list-disc ml-4">
|
||||
{LLM_SELECTION_PRIVACY[llmChoice].description.map((desc) => (
|
||||
<li className="text-white/90 text-sm">{desc}</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
<div className="flex flex-col gap-y-2 border-b border-zinc-500/50 pb-4">
|
||||
<div className="text-white text-base font-bold">Embedding Engine</div>
|
||||
<div className="flex items-center gap-2.5">
|
||||
<img
|
||||
src={EMBEDDING_ENGINE_PRIVACY[embeddingEngine].logo}
|
||||
alt="LLM Logo"
|
||||
className="w-8 h-8 rounded"
|
||||
/>
|
||||
<p className="text-white text-sm font-bold">
|
||||
{EMBEDDING_ENGINE_PRIVACY[embeddingEngine].name}
|
||||
</p>
|
||||
</div>
|
||||
<ul className="flex flex-col list-disc ml-4">
|
||||
{EMBEDDING_ENGINE_PRIVACY[embeddingEngine].description.map(
|
||||
(desc) => (
|
||||
<li className="text-white/90 text-sm">{desc}</li>
|
||||
)
|
||||
)}
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-y-2 pb-4">
|
||||
<div className="text-white text-base font-bold">Vector Database</div>
|
||||
<div className="flex items-center gap-2.5">
|
||||
<img
|
||||
src={VECTOR_DB_PRIVACY[vectorDb].logo}
|
||||
alt="LLM Logo"
|
||||
className="w-8 h-8 rounded"
|
||||
/>
|
||||
<p className="text-white text-sm font-bold">
|
||||
{VECTOR_DB_PRIVACY[vectorDb].name}
|
||||
</p>
|
||||
</div>
|
||||
<ul className="flex flex-col list-disc ml-4">
|
||||
{VECTOR_DB_PRIVACY[vectorDb].description.map((desc) => (
|
||||
<li className="text-white/90 text-sm">{desc}</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function TelemetryLogs({ settings }) {
|
||||
const [telemetry, setTelemetry] = useState(
|
||||
settings?.DisableTelemetry !== "true"
|
||||
);
|
||||
async function toggleTelemetry() {
|
||||
await System.updateSystem({
|
||||
DisableTelemetry: !telemetry ? "false" : "true",
|
||||
});
|
||||
setTelemetry(!telemetry);
|
||||
showToast(
|
||||
`Anonymous Telemetry has been ${!telemetry ? "enabled" : "disabled"}.`,
|
||||
"info",
|
||||
{ clear: true }
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="relative w-full max-h-full">
|
||||
<div className="relative rounded-lg">
|
||||
<div className="flex items-start justify-between px-6 py-4"></div>
|
||||
<div className="space-y-6 flex h-full w-full">
|
||||
<div className="w-full flex flex-col gap-y-4">
|
||||
<div className="">
|
||||
<label className="mb-2.5 block font-medium text-white">
|
||||
Anonymous Telemetry Enabled
|
||||
</label>
|
||||
<label className="relative inline-flex cursor-pointer items-center">
|
||||
<input
|
||||
type="checkbox"
|
||||
onClick={toggleTelemetry}
|
||||
checked={telemetry}
|
||||
className="peer sr-only pointer-events-none"
|
||||
/>
|
||||
<div className="pointer-events-none peer h-6 w-11 rounded-full bg-stone-400 after:absolute after:left-[2px] after:top-[2px] after:h-5 after:w-5 after:rounded-full after:shadow-xl after:border after:border-gray-600 after:bg-white after:box-shadow-md after:transition-all after:content-[''] peer-checked:bg-lime-300 peer-checked:after:translate-x-full peer-checked:after:border-white peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-800"></div>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-col items-left space-y-2">
|
||||
<p className="text-white/80 text-xs rounded-lg w-96">
|
||||
All events do not record IP-address and contain{" "}
|
||||
<b>no identifying</b> content, settings, chats, or other non-usage
|
||||
based information. To see the list of event tags collected you can
|
||||
look on{" "}
|
||||
<a
|
||||
href="https://github.com/search?q=repo%3AMintplex-Labs%2Fanything-llm%20.sendTelemetry(&type=code"
|
||||
className="underline text-blue-400"
|
||||
target="_blank"
|
||||
>
|
||||
Github here
|
||||
</a>
|
||||
.
|
||||
</p>
|
||||
<p className="text-white/80 text-xs rounded-lg w-96">
|
||||
As an open-source project we respect your right to privacy. We are
|
||||
dedicated to building the best solution for integrating AI and
|
||||
documents privately and securely. If you do decide to turn off
|
||||
telemetry all we ask is to consider sending us feedback and thoughts
|
||||
so that we can continue to improve AnythingLLM for you.{" "}
|
||||
<a
|
||||
href="mailto:team@mintplexlabs.com"
|
||||
className="underline text-blue-400"
|
||||
target="_blank"
|
||||
>
|
||||
team@mintplexlabs.com
|
||||
</a>
|
||||
.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
@ -0,0 +1,180 @@
|
||||
import React, { useEffect, useState } from "react";
|
||||
import { isMobile } from "react-device-detect";
|
||||
import Sidebar from "@/components/SettingsSidebar";
|
||||
import System from "@/models/system";
|
||||
import showToast from "@/utils/toast";
|
||||
import PreLoader from "@/components/Preloader";
|
||||
|
||||
import OpenAiLogo from "@/media/llmprovider/openai.png";
|
||||
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
|
||||
import OpenAiWhisperOptions from "@/components/TranscriptionSelection/OpenAiOptions";
|
||||
import NativeTranscriptionOptions from "@/components/TranscriptionSelection/NativeTranscriptionOptions";
|
||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import { MagnifyingGlass } from "@phosphor-icons/react";
|
||||
|
||||
export default function TranscriptionModelPreference() {
|
||||
const [saving, setSaving] = useState(false);
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
const [settings, setSettings] = useState(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [searchQuery, setSearchQuery] = useState("");
|
||||
const [filteredProviders, setFilteredProviders] = useState([]);
|
||||
const [selectedProvider, setSelectedProvider] = useState(null);
|
||||
|
||||
const handleSubmit = async (e) => {
|
||||
e.preventDefault();
|
||||
const form = e.target;
|
||||
const data = { WhisperProvider: selectedProvider };
|
||||
const formData = new FormData(form);
|
||||
|
||||
for (var [key, value] of formData.entries()) data[key] = value;
|
||||
const { error } = await System.updateSystem(data);
|
||||
setSaving(true);
|
||||
|
||||
if (error) {
|
||||
showToast(`Failed to save preferences: ${error}`, "error");
|
||||
} else {
|
||||
showToast("Transcription preferences saved successfully.", "success");
|
||||
}
|
||||
setSaving(false);
|
||||
setHasChanges(!!error);
|
||||
};
|
||||
|
||||
const updateProviderChoice = (selection) => {
|
||||
setSelectedProvider(selection);
|
||||
setHasChanges(true);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
async function fetchKeys() {
|
||||
const _settings = await System.keys();
|
||||
setSettings(_settings);
|
||||
setSelectedProvider(_settings?.WhisperProvider || "local");
|
||||
setLoading(false);
|
||||
}
|
||||
fetchKeys();
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const filtered = PROVIDERS.filter((provider) =>
|
||||
provider.name.toLowerCase().includes(searchQuery.toLowerCase())
|
||||
);
|
||||
setFilteredProviders(filtered);
|
||||
}, [searchQuery, selectedProvider]);
|
||||
|
||||
const PROVIDERS = [
|
||||
{
|
||||
name: "OpenAI",
|
||||
value: "openai",
|
||||
logo: OpenAiLogo,
|
||||
options: <OpenAiWhisperOptions settings={settings} />,
|
||||
description:
|
||||
"Leverage the OpenAI Whisper-large model using your API key.",
|
||||
},
|
||||
{
|
||||
name: "AnythingLLM Built-In",
|
||||
value: "local",
|
||||
logo: AnythingLLMIcon,
|
||||
options: <NativeTranscriptionOptions settings={settings} />,
|
||||
description: "Run a built-in whisper model on this instance privately.",
|
||||
},
|
||||
];
|
||||
|
||||
return (
|
||||
<div className="w-screen h-screen overflow-hidden bg-sidebar flex">
|
||||
<Sidebar />
|
||||
{loading ? (
|
||||
<div
|
||||
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
||||
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
||||
>
|
||||
<div className="w-full h-full flex justify-center items-center">
|
||||
<PreLoader />
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div
|
||||
style={{ height: isMobile ? "100%" : "calc(100% - 32px)" }}
|
||||
className="relative md:ml-[2px] md:mr-[16px] md:my-[16px] md:rounded-[16px] bg-main-gradient w-full h-full overflow-y-scroll"
|
||||
>
|
||||
<form onSubmit={handleSubmit} className="flex w-full">
|
||||
<div className="flex flex-col w-full px-1 md:pl-6 md:pr-[86px] md:py-6 py-16">
|
||||
<div className="w-full flex flex-col gap-y-1 pb-6 border-white border-b-2 border-opacity-10">
|
||||
<div className="flex gap-x-4 items-center">
|
||||
<p className="text-lg leading-6 font-bold text-white">
|
||||
Transcription Model Preference
|
||||
</p>
|
||||
{hasChanges && (
|
||||
<button
|
||||
type="submit"
|
||||
disabled={saving}
|
||||
className="flex items-center gap-x-2 px-4 py-2 rounded-lg bg-[#2C2F36] text-white text-sm hover:bg-[#3D4147] shadow-md border border-[#3D4147]"
|
||||
>
|
||||
{saving ? "Saving..." : "Save changes"}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60">
|
||||
These are the credentials and settings for your preferred
|
||||
transcription model provider. Its important these keys are
|
||||
current and correct or else media files and audio will not
|
||||
transcribe.
|
||||
</p>
|
||||
</div>
|
||||
<div className="text-sm font-medium text-white mt-6 mb-4">
|
||||
Transcription Providers
|
||||
</div>
|
||||
<div className="w-full">
|
||||
<div className="w-full relative border-slate-300/20 shadow border-4 rounded-xl text-white">
|
||||
<div className="w-full p-4 absolute top-0 rounded-t-lg backdrop-blur-sm">
|
||||
<div className="w-full flex items-center sticky top-0">
|
||||
<MagnifyingGlass
|
||||
size={16}
|
||||
weight="bold"
|
||||
className="absolute left-4 z-30 text-white"
|
||||
/>
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Search audio transcription providers"
|
||||
className="bg-zinc-600 z-20 pl-10 h-[38px] rounded-full w-full px-4 py-1 text-sm border-2 border-slate-300/40 outline-none focus:border-white text-white"
|
||||
onChange={(e) => setSearchQuery(e.target.value)}
|
||||
autoComplete="off"
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter") e.preventDefault();
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="px-4 pt-[70px] flex flex-col gap-y-1 max-h-[390px] overflow-y-auto no-scroll pb-4">
|
||||
{filteredProviders.map((provider) => {
|
||||
return (
|
||||
<LLMItem
|
||||
key={provider.name}
|
||||
name={provider.name}
|
||||
value={provider.value}
|
||||
image={provider.logo}
|
||||
description={provider.description}
|
||||
checked={selectedProvider === provider.value}
|
||||
onClick={() => updateProviderChoice(provider.value)}
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
onChange={() => setHasChanges(true)}
|
||||
className="mt-4 flex flex-col gap-y-1"
|
||||
>
|
||||
{selectedProvider &&
|
||||
PROVIDERS.find(
|
||||
(provider) => provider.value === selectedProvider
|
||||
)?.options}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
@ -29,7 +29,7 @@ import { useNavigate } from "react-router-dom";
|
||||
const TITLE = "Data Handling & Privacy";
|
||||
const DESCRIPTION =
|
||||
"We are committed to transparency and control when it comes to your personal data.";
|
||||
const LLM_SELECTION_PRIVACY = {
|
||||
export const LLM_SELECTION_PRIVACY = {
|
||||
openai: {
|
||||
name: "OpenAI",
|
||||
description: [
|
||||
@ -138,7 +138,7 @@ const LLM_SELECTION_PRIVACY = {
|
||||
},
|
||||
};
|
||||
|
||||
const VECTOR_DB_PRIVACY = {
|
||||
export const VECTOR_DB_PRIVACY = {
|
||||
chroma: {
|
||||
name: "Chroma",
|
||||
description: [
|
||||
@ -199,7 +199,7 @@ const VECTOR_DB_PRIVACY = {
|
||||
},
|
||||
};
|
||||
|
||||
const EMBEDDING_ENGINE_PRIVACY = {
|
||||
export const EMBEDDING_ENGINE_PRIVACY = {
|
||||
native: {
|
||||
name: "AnythingLLM Embedder",
|
||||
description: [
|
||||
|
@ -1,3 +1,5 @@
|
||||
export const ABORT_STREAM_EVENT = "abort-chat-stream";
|
||||
|
||||
// For handling of chat responses in the frontend by their various types.
|
||||
export default function handleChat(
|
||||
chatResult,
|
||||
@ -108,6 +110,22 @@ export default function handleChat(
|
||||
_chatHistory[chatIdx] = updatedHistory;
|
||||
}
|
||||
setChatHistory([..._chatHistory]);
|
||||
setLoadingResponse(false);
|
||||
} else if (type === "stopGeneration") {
|
||||
const chatIdx = _chatHistory.length - 1;
|
||||
const existingHistory = { ..._chatHistory[chatIdx] };
|
||||
const updatedHistory = {
|
||||
...existingHistory,
|
||||
sources: [],
|
||||
closed: true,
|
||||
error: null,
|
||||
animate: false,
|
||||
pending: false,
|
||||
};
|
||||
_chatHistory[chatIdx] = updatedHistory;
|
||||
|
||||
setChatHistory([..._chatHistory]);
|
||||
setLoadingResponse(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,6 +92,9 @@ export default {
|
||||
llmPreference: () => {
|
||||
return "/settings/llm-preference";
|
||||
},
|
||||
transcriptionPreference: () => {
|
||||
return "/settings/transcription-preference";
|
||||
},
|
||||
embeddingPreference: () => {
|
||||
return "/settings/embedding-preference";
|
||||
},
|
||||
@ -110,6 +113,9 @@ export default {
|
||||
logs: () => {
|
||||
return "/settings/event-logs";
|
||||
},
|
||||
privacy: () => {
|
||||
return "/settings/privacy";
|
||||
},
|
||||
embedSetup: () => {
|
||||
return `/settings/embed-config`;
|
||||
},
|
||||
|
@ -128,6 +128,16 @@ VECTOR_DB="lancedb"
|
||||
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
|
||||
# ZILLIZ_API_TOKEN=api-token-here
|
||||
|
||||
###########################################
|
||||
######## Audio Model Selection ############
|
||||
###########################################
|
||||
# (default) use built-in whisper-small model.
|
||||
WHISPER_PROVIDER="local"
|
||||
|
||||
# use openai hosted whisper model.
|
||||
# WHISPER_PROVIDER="openai"
|
||||
# OPEN_AI_KEY=sk-xxxxxxxx
|
||||
|
||||
# CLOUD DEPLOYMENT VARIRABLES ONLY
|
||||
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
|
||||
# STORAGE_DIR= # absolute filesystem path with no trailing slash
|
||||
|
@ -47,6 +47,7 @@ const SystemSettings = {
|
||||
EmbeddingModelMaxChunkLength:
|
||||
process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH,
|
||||
LocalAiApiKey: !!process.env.LOCAL_AI_API_KEY,
|
||||
DisableTelemetry: process.env.DISABLE_TELEMETRY || "false",
|
||||
...(vectorDB === "pinecone"
|
||||
? {
|
||||
PineConeKey: !!process.env.PINECONE_API_KEY,
|
||||
@ -262,6 +263,7 @@ const SystemSettings = {
|
||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
||||
}
|
||||
: {}),
|
||||
WhisperProvider: process.env.WHISPER_PROVIDER || "local",
|
||||
};
|
||||
},
|
||||
|
||||
|
@ -14,6 +14,9 @@ AnythingLLM allows you to upload various audio and video formats as source docum
|
||||
|
||||
Once transcribed you can embed these transcriptions into your workspace like you would any other file!
|
||||
|
||||
**Other external model/transcription providers are also live.**
|
||||
- [OpenAI Whisper via API key.](https://openai.com/research/whisper)
|
||||
|
||||
## Text generation (LLM selection)
|
||||
> [!IMPORTANT]
|
||||
> Use of a locally running LLM model is **experimental** and may behave unexpectedly, crash, or not function at all.
|
||||
|
@ -1,6 +1,9 @@
|
||||
const { v4 } = require("uuid");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
class AnthropicLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
if (!process.env.ANTHROPIC_API_KEY)
|
||||
@ -45,6 +48,8 @@ class AnthropicLLM {
|
||||
return 200_000;
|
||||
case "claude-3-sonnet-20240229":
|
||||
return 200_000;
|
||||
case "claude-3-haiku-20240307":
|
||||
return 200_000;
|
||||
default:
|
||||
return 100_000; // assume a claude-instant-1.2 model
|
||||
}
|
||||
@ -57,6 +62,7 @@ class AnthropicLLM {
|
||||
"claude-2.1",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
];
|
||||
return validModels.includes(modelName);
|
||||
}
|
||||
@ -150,6 +156,13 @@ class AnthropicLLM {
|
||||
let fullText = "";
|
||||
const { uuid = v4(), sources = [] } = responseProps;
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
stream.on("streamEvent", (message) => {
|
||||
const data = message;
|
||||
if (
|
||||
@ -181,6 +194,7 @@ class AnthropicLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
}
|
||||
});
|
||||
|
@ -1,6 +1,9 @@
|
||||
const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
class AzureOpenAiLLM {
|
||||
constructor(embedder = null, _modelPreference = null) {
|
||||
@ -174,6 +177,14 @@ class AzureOpenAiLLM {
|
||||
|
||||
return new Promise(async (resolve) => {
|
||||
let fullText = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
for await (const event of stream) {
|
||||
for (const choice of event.choices) {
|
||||
const delta = choice.delta?.content;
|
||||
@ -198,6 +209,7 @@ class AzureOpenAiLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
});
|
||||
}
|
||||
|
@ -1,5 +1,8 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
class GeminiLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
@ -198,6 +201,14 @@ class GeminiLLM {
|
||||
|
||||
return new Promise(async (resolve) => {
|
||||
let fullText = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
fullText += chunk.text();
|
||||
writeResponseChunk(response, {
|
||||
@ -218,6 +229,7 @@ class GeminiLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
});
|
||||
}
|
||||
|
@ -1,7 +1,10 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
class HuggingFaceLLM {
|
||||
constructor(embedder = null, _modelPreference = null) {
|
||||
@ -172,6 +175,14 @@ class HuggingFaceLLM {
|
||||
return new Promise((resolve) => {
|
||||
let fullText = "";
|
||||
let chunk = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
stream.data.on("data", (data) => {
|
||||
const lines = data
|
||||
?.toString()
|
||||
@ -218,6 +229,7 @@ class HuggingFaceLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
} else {
|
||||
let error = null;
|
||||
@ -241,6 +253,7 @@ class HuggingFaceLLM {
|
||||
close: true,
|
||||
error,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve("");
|
||||
return;
|
||||
}
|
||||
@ -266,6 +279,7 @@ class HuggingFaceLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,10 @@ const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
// Docs: https://api.js.langchain.com/classes/chat_models_llama_cpp.ChatLlamaCpp.html
|
||||
const ChatLlamaCpp = (...args) =>
|
||||
@ -176,6 +179,14 @@ class NativeLLM {
|
||||
|
||||
return new Promise(async (resolve) => {
|
||||
let fullText = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (chunk === undefined)
|
||||
throw new Error(
|
||||
@ -202,6 +213,7 @@ class NativeLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
});
|
||||
}
|
||||
|
@ -1,6 +1,9 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { StringOutputParser } = require("langchain/schema/output_parser");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md
|
||||
class OllamaAILLM {
|
||||
@ -180,8 +183,16 @@ class OllamaAILLM {
|
||||
const { uuid = uuidv4(), sources = [] } = responseProps;
|
||||
|
||||
return new Promise(async (resolve) => {
|
||||
let fullText = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
try {
|
||||
let fullText = "";
|
||||
for await (const chunk of stream) {
|
||||
if (chunk === undefined)
|
||||
throw new Error(
|
||||
@ -210,6 +221,7 @@ class OllamaAILLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
} catch (error) {
|
||||
writeResponseChunk(response, {
|
||||
@ -222,6 +234,7 @@ class OllamaAILLM {
|
||||
error?.cause ?? error.message
|
||||
}`,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1,7 +1,10 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { v4: uuidv4 } = require("uuid");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
function openRouterModels() {
|
||||
const { MODELS } = require("./models.js");
|
||||
@ -195,6 +198,13 @@ class OpenRouterLLM {
|
||||
let chunk = "";
|
||||
let lastChunkTime = null; // null when first token is still not received.
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
// NOTICE: Not all OpenRouter models will return a stop reason
|
||||
// which keeps the connection open and so the model never finalizes the stream
|
||||
// like the traditional OpenAI response schema does. So in the case the response stream
|
||||
@ -220,6 +230,7 @@ class OpenRouterLLM {
|
||||
error: false,
|
||||
});
|
||||
clearInterval(timeoutCheck);
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
}
|
||||
}, 500);
|
||||
@ -269,6 +280,7 @@ class OpenRouterLLM {
|
||||
error: false,
|
||||
});
|
||||
clearInterval(timeoutCheck);
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
} else {
|
||||
let finishReason = null;
|
||||
@ -305,6 +317,7 @@ class OpenRouterLLM {
|
||||
error: false,
|
||||
});
|
||||
clearInterval(timeoutCheck);
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,8 @@
|
||||
const { chatPrompt } = require("../../chats");
|
||||
const { writeResponseChunk } = require("../../helpers/chat/responses");
|
||||
const {
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
} = require("../../helpers/chat/responses");
|
||||
|
||||
function togetherAiModels() {
|
||||
const { MODELS } = require("./models.js");
|
||||
@ -185,6 +188,14 @@ class TogetherAiLLM {
|
||||
return new Promise((resolve) => {
|
||||
let fullText = "";
|
||||
let chunk = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
stream.data.on("data", (data) => {
|
||||
const lines = data
|
||||
?.toString()
|
||||
@ -230,6 +241,7 @@ class TogetherAiLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
} else {
|
||||
let finishReason = null;
|
||||
@ -263,6 +275,7 @@ class TogetherAiLLM {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
}
|
||||
}
|
||||
|
@ -5,13 +5,20 @@
|
||||
|
||||
class CollectorApi {
|
||||
constructor() {
|
||||
this.endpoint = "http://0.0.0.0:8888";
|
||||
this.endpoint = `http://0.0.0.0:${process.env.COLLECTOR_PORT || 8888}`;
|
||||
}
|
||||
|
||||
log(text, ...args) {
|
||||
console.log(`\x1b[36m[CollectorApi]\x1b[0m ${text}`, ...args);
|
||||
}
|
||||
|
||||
#attachOptions() {
|
||||
return {
|
||||
whisperProvider: process.env.WHISPER_PROVIDER || "local",
|
||||
openAiKey: process.env.OPEN_AI_KEY || null,
|
||||
};
|
||||
}
|
||||
|
||||
async online() {
|
||||
return await fetch(this.endpoint)
|
||||
.then((res) => res.ok)
|
||||
@ -38,7 +45,10 @@ class CollectorApi {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ filename }),
|
||||
body: JSON.stringify({
|
||||
filename,
|
||||
options: this.#attachOptions(),
|
||||
}),
|
||||
})
|
||||
.then((res) => {
|
||||
if (!res.ok) throw new Error("Response could not be completed");
|
||||
|
@ -1,6 +1,14 @@
|
||||
const { v4: uuidv4 } = require("uuid");
|
||||
const moment = require("moment");
|
||||
|
||||
function clientAbortedHandler(resolve, fullText) {
|
||||
console.log(
|
||||
"\x1b[43m\x1b[34m[STREAM ABORTED]\x1b[0m Client requested to abort stream. Exiting LLM stream handler early."
|
||||
);
|
||||
resolve(fullText);
|
||||
return;
|
||||
}
|
||||
|
||||
// The default way to handle a stream response. Functions best with OpenAI.
|
||||
// Currently used for LMStudio, LocalAI, Mistral API, and OpenAI
|
||||
function handleDefaultStreamResponse(response, stream, responseProps) {
|
||||
@ -9,6 +17,14 @@ function handleDefaultStreamResponse(response, stream, responseProps) {
|
||||
return new Promise((resolve) => {
|
||||
let fullText = "";
|
||||
let chunk = "";
|
||||
|
||||
// Establish listener to early-abort a streaming response
|
||||
// in case things go sideways or the user does not like the response.
|
||||
// We preserve the generated text but continue as if chat was completed
|
||||
// to preserve previously generated content.
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
stream.data.on("data", (data) => {
|
||||
const lines = data
|
||||
?.toString()
|
||||
@ -52,6 +68,7 @@ function handleDefaultStreamResponse(response, stream, responseProps) {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
} else {
|
||||
let finishReason = null;
|
||||
@ -85,6 +102,7 @@ function handleDefaultStreamResponse(response, stream, responseProps) {
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
}
|
||||
}
|
||||
@ -141,4 +159,5 @@ module.exports = {
|
||||
convertToChatHistory,
|
||||
convertToPromptHistory,
|
||||
writeResponseChunk,
|
||||
clientAbortedHandler,
|
||||
};
|
||||
|
@ -269,6 +269,13 @@ const KEY_MAPPING = {
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
|
||||
// Whisper (transcription) providers
|
||||
WhisperProvider: {
|
||||
envKey: "WHISPER_PROVIDER",
|
||||
checks: [isNotEmpty, supportedTranscriptionProvider],
|
||||
postUpdate: [],
|
||||
},
|
||||
|
||||
// System Settings
|
||||
AuthToken: {
|
||||
envKey: "AUTH_TOKEN",
|
||||
@ -278,6 +285,10 @@ const KEY_MAPPING = {
|
||||
envKey: "JWT_SECRET",
|
||||
checks: [requiresForceMode],
|
||||
},
|
||||
DisableTelemetry: {
|
||||
envKey: "DISABLE_TELEMETRY",
|
||||
checks: [],
|
||||
},
|
||||
};
|
||||
|
||||
function isNotEmpty(input = "") {
|
||||
@ -351,6 +362,13 @@ function supportedLLM(input = "") {
|
||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||
}
|
||||
|
||||
function supportedTranscriptionProvider(input = "") {
|
||||
const validSelection = ["openai", "local"].includes(input);
|
||||
return validSelection
|
||||
? null
|
||||
: `${input} is not a valid transcription model provider.`;
|
||||
}
|
||||
|
||||
function validGeminiModel(input = "") {
|
||||
const validModels = ["gemini-pro"];
|
||||
return validModels.includes(input)
|
||||
@ -365,6 +383,7 @@ function validAnthropicModel(input = "") {
|
||||
"claude-2.1",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
];
|
||||
return validModels.includes(input)
|
||||
? null
|
||||
|
Loading…
Reference in New Issue
Block a user