From fc77b468006749af434a123625ee3a8b4a78fb45 Mon Sep 17 00:00:00 2001
From: Sean Hatfield
Date: Thu, 2 May 2024 12:12:44 -0700
Subject: [PATCH 1/4] [FEAT] KoboldCPP LLM Support (#1268)
* koboldcpp LLM support
* update .env.examples for koboldcpp support
* update LLM preference order
update koboldcpp comments
---------
Co-authored-by: timothycarambat
---
docker/.env.example | 5 +
.../LLMSelection/KoboldCPPOptions/index.jsx | 112 +++++++++++
frontend/src/media/llmprovider/koboldcpp.png | Bin 0 -> 7110 bytes
.../GeneralSettings/LLMPreference/index.jsx | 14 ++
.../Steps/DataHandling/index.jsx | 8 +
.../Steps/LLMPreference/index.jsx | 9 +
server/.env.example | 5 +
server/models/systemSettings.js | 5 +
server/utils/AiProviders/koboldCPP/index.js | 180 ++++++++++++++++++
server/utils/helpers/customModels.js | 25 +++
server/utils/helpers/index.js | 3 +
server/utils/helpers/updateENV.js | 15 ++
12 files changed, 381 insertions(+)
create mode 100644 frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
create mode 100644 frontend/src/media/llmprovider/koboldcpp.png
create mode 100644 server/utils/AiProviders/koboldCPP/index.js
diff --git a/docker/.env.example b/docker/.env.example
index 20120b5b..e10ace02 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -66,6 +66,11 @@ GID='1000'
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama3-8b-8192
+# LLM_PROVIDER='koboldcpp'
+# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
+# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
+# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
+
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
diff --git a/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx b/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
new file mode 100644
index 00000000..7e5e20ae
--- /dev/null
+++ b/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
@@ -0,0 +1,112 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function KoboldCPPOptions({ settings }) {
+ const [basePathValue, setBasePathValue] = useState(
+ settings?.KoboldCPPBasePath
+ );
+ const [basePath, setBasePath] = useState(settings?.KoboldCPPBasePath);
+
+ return (
+
+
+
+ Base URL
+
+ setBasePathValue(e.target.value)}
+ onBlur={() => setBasePath(basePathValue)}
+ />
+
+
+
+
+ Token context window
+
+ e.target.blur()}
+ defaultValue={settings?.KoboldCPPTokenLimit}
+ required={true}
+ autoComplete="off"
+ />
+
+
+ );
+}
+
+function KoboldCPPModelSelection({ settings, basePath = null }) {
+ const [customModels, setCustomModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!basePath || !basePath.includes("/v1")) {
+ setCustomModels([]);
+ setLoading(false);
+ return;
+ }
+ setLoading(true);
+ const { models } = await System.customModels("koboldcpp", null, basePath);
+ setCustomModels(models || []);
+ setLoading(false);
+ }
+ findCustomModels();
+ }, [basePath]);
+
+ if (loading || customModels.length === 0) {
+ return (
+
+
+ Chat Model Selection
+
+
+
+ {basePath?.includes("/v1")
+ ? "-- loading available models --"
+ : "-- waiting for URL --"}
+
+
+
+ );
+ }
+
+ return (
+
+
+ Chat Model Selection
+
+
+ {customModels.map((model) => (
+
+ {model.id}
+
+ ))}
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/koboldcpp.png b/frontend/src/media/llmprovider/koboldcpp.png
new file mode 100644
index 0000000000000000000000000000000000000000..5724f04ab3a1bc40ac4e56c1fe9ee61fc67ba3ea
GIT binary patch
literal 7110
zcmeHMXIPV4wnkA9rKmKOW+fse1VJD)k*YM6DjrHm0t6BgNCJdV6hx6K7E}-rQF;-i
z7ZGVn_W;tn0up-f+#TnPqt3ne$K3m4ehd#G$^Q1YzP;97>wVv~xqDe(i*4_by$lQt
zY!|iFuY&hA`fCpp_!}@`t_0p#2-??(3=GUR^w$nKkEh)X47+A9MrI^4JzY2wkCQ;4
z@HS`(cN_uCW?)cshl7teGzoFk9fx%y!rhf1-)F$VXZkP{qP+L$Uq6a2C|medbS!X;
z1w$ZV@J>hRrV%!HGD!&n`7!HnOF|)kJcdAa!G3=T3JFDH(Kxgdi3o*Bz9=Vatz(lA+h89B)x_k6$jPq!H1$w+&;Tgv}f=|9H)G5vqo
z=3m40cUON6>;Kb66Mm{E`YwMjRQmpc3t)fG{<6Pd46aHh+2dW54bcdvquL0PEe4GR
z5&Y@OZ{Gdia}i+0FNyeLiS7u(Kcyg;4F8@kco!o)9;>W|aB@W)J$)7JghIQZT|^Y2
zzt8yJ*86u5`fUz?qo2|l`UB3CncOdTFfj0VUQ|~#a(_12OEoe!7G$1*K9)Ul@>W>w
zVG$Pd+S3l~j!ZS!{d#Ago&0pXrg>*{ee!_=R}+mcTw%uTA}0?s3Jowed!p_JpH!~q
z7kH?9`R*0c)tC!6Ed5t2cGumfHWv2Oom%%&*)CjCUZ0?r^i!uhsj(r(YHDhX0b_Q0
z?VkmXzW7}8oRj6B%m|=I8Y8HXGk-m#?+9bv$BbGtIkt4`vPzl>4o;#BQ}>ui4w%
z2l-iEzaHeXNwfOU)THI?3`tZ@mU5Ca{;5H6-Q7L_|cM
zNFsd8=L{3|2cO!-T)`!#rLjUwW_-)>LJR~Km-l_8)Y%?lQ*Kh`LW`-X>7CYAqn8N@
zL$TW%5C=y`ZW@gQUzq6R^ke3z7R@a2?cW%Y)k8Z>H;i<-I6L2risDmLRGgt%Z6CRL
z^X6E0er7q+)5|M7Ha2sWKQuIyKqmVa7b}3F!O_t>;U~0Scr*cH6T&@bg}tvS@8>
zJtUTynQ4ks%WOwZbY!hG`E13T;Ts}lSXY}KT4vUCp074#_|Z&g`cZSYu@tZzBU97(f`UWfdPgAwSVHpPT!2~sMPwj358914ztlg}t`ZxE~C
z^CJxvPhd!~))Uz;K?vU0)G!YY4r=Btma%p2@0lb#(<=9CX^E~WfVf|-G-%K5JxVX5iS7rv~oQH08c008^
zpYAQ@4G#|wBr~xcK@lYH3R=2N^{8rV?|Bg)AJ=sRhE4
zn#0EM_3PI?EG)3K2sOQ^-JA7K&+cJkGu{ySGC3KNem(zvUtgcE(g~d>iWI3lT(|9#
zs{lX0q}LkhO+K}(qr=R|$SB8r-QVBeEE{$AS~vE=bE`6Mfq2c33lvIWIecnrO54Do
z1{tPgTOJdBsyp^m(kJq?CgVy2C>y3pFn~#zgxi*9UUDzIy&C<^$A)Ok;*~!g+F|FYq%zC_#-6N
zNYMm0bBG}=EzKHS$At52pjoF$&a`01yZCJGa9eR3cg$V{O)*mJXWdsKW
z1y!sq(zt2r8XAXlKim`){P^*soaf4Gn`p2K>$(W>LsF=KD>#lL(tG@v*rZ-jRc8Lw
zfr>=*7dThETuM(*KXB+!KuL+BANkUyOSv4#PQVEZ^
z`Af5mmT#{yEyD3?eMjJ=F$O-qihe%_(WM*$k!UEEnVNcbl|Mx6;KBol!ErG$-C0Of
zY^>1d^y|#gjh+?`_V(u~6#3fP+UDkF>pWMsP7))|!6M3{zpr^o@(b(A77H67z
z`!>_VM~?%Reb2t0d1K*3ddmESbe(pXG(
z^gJn*d2_ribq_nciRS}%cX#WWPyze)jOdG_XQ0rUKyJ~QF2~IBiVDW1r6oE(0Sud%
zoSa=~F&GY!?Ry7q)$?dMmjc#w;lj?HjEn$HS?%oX6qh=X05C6PT9)n?5J+EXslVsz
zyR)*Y3eoWzG1i*Q35G8v|7E
z?Afz{kr5_P5;>0T>;R(73J7sm40YF+CJn{hTwOIuq@3j5b5VLx=R_h@W-B>`bFGYx
z{ej)warvqC@#Qg2VcmE6{oanRd#b8-V=x$ZkQvuKL+9pP>Z4>i96zV2qS5F#bq>~pL}}3@g#)&?Zw3J9p+>D1KJ_3|}ZK)@d0QgRFg
z3gsY<)T^4CN0#FsL`Fs?|@(pUDT3y_bvoa|<@_-#-{E
zq#|gV@dwOo^plp542>v|E=YQHue+ne8Z37V@L5oIQ*G`0%DbG--~K!mA1W#$QeHxt
zzZV{UlT7<8q8zt?1ipS!98YPd%`TU13v6#Kmxaf~u)Db}+tN;*I(6*KnZ0doZISCc
zdZ;{}?{Bft>A@u39DUWSwyy2~KYuW&GKUnat5<#V`#knpT3V)%+q^qj1>i<4@(m)g
zvb@~f+y&HS2|8#jOA(UaBn3KZ=vWt;09kW&YNc6+UkL4iAx!dnC
zEN9$_j*iabeW4nq9JckiY(s-gGW^&WtpM{_u6Ra}L8rbOidW%xcuiAv`xy_y`9
z!zqyEs;a6VYHK~LUs)L$ae+vNIL&((yH39M-^Y9J>C?Nw2A|hPw1Qjk-qp?4gqvW8
zGpx!o%o4>v>)yI`D`%_#hOD;t2=xI%ASGX{AuB5jwvN+{${-u)>sJFK<|M4E>p4s6
zq|Ad0eMu=Pdwo_agp8Pjbjb^2pP088hlhvt*eUUuWm`v}r3(k&Zf{ZJ!!~CsIIeTK
zi03TSw6*0LH{zT?(Wzsx*wh$0=dHA5dZRy|>y{n^RajLAEyFH6`!d;`52MVX^!9B7
z0vQ+>cy68v;x%0|uOFmYKRGdB^jPm4ptox+DenOLIa;1t`fwu5J~CX6GWYTmzEadQ
zV*u~79+gmJo;kXigH5??Vq$9)g^L3cq9Y5*9OE&bY@(n=AV~5^{9(W3+Pwj5BaSu>
zJ~Fk0jbox8iJGSL&1hnkB0j|c2IKJD5Yu2Ce|$pb<{VM9YUJtY**+}zxbGM{Y@
z*DDx(fDl{j{o4-S0-(Z-qj{Y^E@DwgWIWu?b9P9hVH3AciMmu7r*>yxdb
zyBovip{vW9=hPeMy}2e6;!34Z5FhWKN{JE#4l+M_^r)khnqLl=kdUBPA;6uM;9Jqr
z0sz6nA3gF10;O{xi%ceK>*%yBz>loQS3ab5YU2v+bf(baao6*m00bQi6;Md{hTuYe
zBf*1@$m|6tEGsL+VFrC#
zaB#?)3377!l~R|FLm-Krb4|+Yx{Gi7+uM1;&d8W;RrBGkL0)!bqjlbV)zHuY(CIgn
zKgKNNd4ulMbz3{TJI1LdgFp!}C2}rLb||)^J4$~
ze7qJGn>H?6CcnD6x;YS0S6jQcv9Xbk>4}LDCe}kYwl>%E^cO0@@%WBZH3A
zG1Mt}R(`qWwE3^M%}}jg<-WTMJXfWz8X9ubA!Crv^!&~nOWk;G(~b}U1(RGAWOXoX
zr8+G+xjfggohyukh2=`Am7s!aTKRLv@fJN$EJZdH00l7Ahy8wK=N~4>OeZdJ_P5eH
z?SYSh!wj!py$T2kX;U<@W_N7Qz_oqa>IYDzt*`HtyA*;5r;-4l1Je%XsQsLr@hf5(
zUD6FL1}fPJKgatBRh5mWxBA^Hv@uPDyGm>Trc_rCb0CCzB%4a|b({Y@+
zc@Yv8DMjYN=G8WS&Om>6y!BFu@mjjpRs>}0%1{uLH7{Phu7b&d(MMljpKem<(r5t?
z#|g80UlHvsCKJ#3I)&0$de&dd<}6d=*Va7f2+hpSe;F4RjQn&32q`y^TF*g05x6)$
zKED6pL0|A$>2gBeTwSvjYhi3xZ6aWt(-DrH$%g4dmzL2UrIsyBH!o$X(eB&iGDGT}
zmH6~x6c5M5!~~K71j9U62$eN8e2cft04(u9Ez=6$O7p~8Qb(TJz4lsnZNoPJQroqA
zw-O=q+qZ9bwS>G{2b@Js(j?{1@9yE-bG_I#z1+pXz@SZbJ3%jsi+9a)dviI^FEB4J
zZx1W0k>Z`MT4ulLhQ^!#}SCU*XS&Np_V8A6ZK^hQ2yZf?#3@vcTbPn-K%2fDkpa+|$WDz!@M
z@V^lIIT%@3ggk%#+*T`9M@NTF09D~w6&00TyLO#+
zGJO~m6S~~*Q`UL-2EAIyM0W8^#sW9dmpKdVt+Z8S#cj3<>U3#h^3>P5$1v&=c4d9N
zqO&3&y#s=Ce4NcA4Lrtp8%*-W=
zpRY%*Z7;T4#k)=S0cU+PGofke>GU>bqNmW1e8UmwwdQXmQ2I3hELlJ6XltL>)MU~O
zK6Ep~qWA^C)XvUMJv>&;V_^)@s&*XEdIcy;wut-%k0aUE`6r1yq9wy)$cJer-6ncXP*
zZAx02QE%ROO&@hyNzcGWrpRmk9KCaC&8q?mqNc!kn4yQ7WVgAt7?{5UG%hP&9vDdH
zbM$n7ta<{81u{<(w3t=7TjGT=w>!GJj-NWU8wi)yoR2SHN6Ok7N-gFpG8!Jg18L25
zLNoYs`s2vRKuk-5%qF!n8$GkQD05wfE(a*R6rIklE}O;iHoapc5~(sm{5k+K<~Pwu
zpn5b_<}f&1NL#8YU+L{$E_!j6Qe}@HKVILrlwzEE`Hw%aORhq;I6)HVE9mZRt}mOW
zN8P*Eob&vBTN`(*_geC`_gkQbkITpy&*xj~2r{;PBWY`EyAK%bS*Epf02OH4NjfGh
ztT}6P{^Iu5X84mQNd*Rx#~j{p(fzUkq|#ABd2^93Y{N`Xuff3eBJMh7{yF0^Pr%Dp
zuQ2BB2M!;;`|u$b!*W6rC<94!?L(ePnNiS;78J-$^p$oeS=cJCjh!gjTu$8MX9^*d5LZADk22%0<{k{A5ZCd;JIM~>#e0T5Dg--+Kb{?re
ztgfkfrZLHWaC$ln_=y~_S2Tqu7hzt*pEBagr+I<890kqIg0yHhwbB<+h
zZcY;O=Se-jaiy)5VbGT|fmU7;STmJqc`o$C~^#s^|&^g(5
zvQ&Y%&(6*kxX&I3z#2Fj>ti{;9dzdobD#G*j$g@x%gTH8)p+YGbrZIT4#76xaT?8e
zZ2|L4k#bPhsmDGtl1L<~CF(~vm-+YraaAF ,
+ description: "Run local LLMs using koboldcpp.",
+ requiredConfig: [
+ "KoboldCPPModelPref",
+ "KoboldCPPBasePath",
+ "KoboldCPPTokenLimit",
+ ],
+ },
{
name: "Cohere",
value: "cohere",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index d0613b8c..6e8a1897 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -15,6 +15,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
import AstraDBLogo from "@/media/vectordbs/astraDB.png";
@@ -138,6 +139,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: GroqLogo,
},
+ koboldcpp: {
+ name: "KoboldCPP",
+ description: [
+ "Your model and chats are only accessible on the server running KoboldCPP",
+ ],
+ logo: KoboldCPPLogo,
+ },
"generic-openai": {
name: "Generic OpenAI compatible service",
description: [
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 0e73c399..4cf3c221 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -15,6 +15,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -38,6 +39,7 @@ import System from "@/models/system";
import paths from "@/utils/paths";
import showToast from "@/utils/toast";
import { useNavigate } from "react-router-dom";
+import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
const TITLE = "LLM Preference";
const DESCRIPTION =
@@ -102,6 +104,13 @@ const LLMS = [
options: (settings) => ,
description: "Run LLMs locally on your own machine.",
},
+ {
+ name: "KoboldCPP",
+ value: "koboldcpp",
+ logo: KoboldCPPLogo,
+ options: (settings) => ,
+ description: "Run local LLMs using koboldcpp.",
+ },
{
name: "Together AI",
value: "togetherai",
diff --git a/server/.env.example b/server/.env.example
index e515cc88..c8f05340 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -63,6 +63,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama3-8b-8192
+# LLM_PROVIDER='koboldcpp'
+# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
+# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
+# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
+
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index dfbdb882..f7782d26 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -359,6 +359,11 @@ const SystemSettings = {
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
+ // KoboldCPP Keys
+ KoboldCPPModelPref: process.env.KOBOLD_CPP_MODEL_PREF,
+ KoboldCPPBasePath: process.env.KOBOLD_CPP_BASE_PATH,
+ KoboldCPPTokenLimit: process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT,
+
// Generic OpenAI Keys
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
diff --git a/server/utils/AiProviders/koboldCPP/index.js b/server/utils/AiProviders/koboldCPP/index.js
new file mode 100644
index 00000000..4b1ff3f6
--- /dev/null
+++ b/server/utils/AiProviders/koboldCPP/index.js
@@ -0,0 +1,180 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ clientAbortedHandler,
+ writeResponseChunk,
+} = require("../../helpers/chat/responses");
+const { v4: uuidv4 } = require("uuid");
+
+class KoboldCPPLLM {
+ constructor(embedder = null, modelPreference = null) {
+ const { OpenAI: OpenAIApi } = require("openai");
+ if (!process.env.KOBOLD_CPP_BASE_PATH)
+ throw new Error(
+ "KoboldCPP must have a valid base path to use for the api."
+ );
+
+ this.basePath = process.env.KOBOLD_CPP_BASE_PATH;
+ this.openai = new OpenAIApi({
+ baseURL: this.basePath,
+ apiKey: null,
+ });
+ this.model = modelPreference ?? process.env.KOBOLD_CPP_MODEL_PREF ?? null;
+ if (!this.model) throw new Error("KoboldCPP must have a valid model set.");
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ if (!embedder)
+ console.warn(
+ "No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!"
+ );
+ this.embedder = !embedder ? new NativeEmbedder() : embedder;
+ this.defaultTemp = 0.7;
+ this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ // Ensure the user set a value for the token limit
+ // and if undefined - assume 4096 window.
+ promptWindowLimit() {
+ const limit = process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT || 4096;
+ if (!limit || isNaN(Number(limit)))
+ throw new Error("No token context limit was set.");
+ return Number(limit);
+ }
+
+ // Short circuit since we have no idea if the model is valid or not
+ // in pre-flight for generic endpoints
+ isValidChatCompletionModel(_modelName = "") {
+ return true;
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ async isSafe(_input = "") {
+ // Not implemented so must be stubbed
+ return { safe: true, reasons: [] };
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ const result = await this.openai.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.response.data.error.message);
+ });
+
+ if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+ return null;
+ return result.choices[0].message.content;
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ const streamRequest = await this.openai.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ });
+ return streamRequest;
+ }
+
+ handleStream(response, stream, responseProps) {
+ const { uuid = uuidv4(), sources = [] } = responseProps;
+
+ // Custom handler for KoboldCPP stream responses
+ return new Promise(async (resolve) => {
+ let fullText = "";
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
+ for await (const chunk of stream) {
+ const message = chunk?.choices?.[0];
+ const token = message?.delta?.content;
+
+ if (token) {
+ fullText += token;
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: token,
+ close: false,
+ error: false,
+ });
+ }
+
+ // KoboldCPP finishes with "length" or "stop"
+ if (
+ message.finish_reason !== "null" &&
+ (message.finish_reason === "length" ||
+ message.finish_reason === "stop")
+ ) {
+ writeResponseChunk(response, {
+ uuid,
+ sources,
+ type: "textResponseChunk",
+ textResponse: "",
+ close: true,
+ error: false,
+ });
+ response.removeListener("close", handleAbort);
+ resolve(fullText);
+ }
+ }
+ });
+ }
+
+ // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+module.exports = {
+ KoboldCPPLLM,
+};
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 1bb54170..ce690ae4 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -14,6 +14,7 @@ const SUPPORT_CUSTOM_MODELS = [
"perplexity",
"openrouter",
"lmstudio",
+ "koboldcpp",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -39,6 +40,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getOpenRouterModels();
case "lmstudio":
return await getLMStudioModels(basePath);
+ case "koboldcpp":
+ return await getKoboldCPPModels(basePath);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -171,6 +174,28 @@ async function getLMStudioModels(basePath = null) {
}
}
+async function getKoboldCPPModels(basePath = null) {
+ try {
+ const { OpenAI: OpenAIApi } = require("openai");
+ const openai = new OpenAIApi({
+ baseURL: basePath || process.env.LMSTUDIO_BASE_PATH,
+ apiKey: null,
+ });
+ const models = await openai.models
+ .list()
+ .then((results) => results.data)
+ .catch((e) => {
+ console.error(`KoboldCPP:listModels`, e.message);
+ return [];
+ });
+
+ return { models, error: null };
+ } catch (e) {
+ console.error(`KoboldCPP:getKoboldCPPModels`, e.message);
+ return { models: [], error: "Could not fetch KoboldCPP Models" };
+ }
+}
+
async function ollamaAIModels(basePath = null) {
let url;
try {
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 5d88040d..ba65e3df 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -77,6 +77,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return new GroqLLM(embedder, model);
+ case "koboldcpp":
+ const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
+ return new KoboldCPPLLM(embedder, model);
case "cohere":
const { CohereLLM } = require("../AiProviders/cohere");
return new CohereLLM(embedder, model);
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 45f2fd54..19cdfe2b 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -132,6 +132,20 @@ const KEY_MAPPING = {
checks: [nonZero],
},
+ // KoboldCPP Settings
+ KoboldCPPBasePath: {
+ envKey: "KOBOLD_CPP_BASE_PATH",
+ checks: [isNotEmpty, isValidURL],
+ },
+ KoboldCPPModelPref: {
+ envKey: "KOBOLD_CPP_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
+ KoboldCPPTokenLimit: {
+ envKey: "KOBOLD_CPP_MODEL_TOKEN_LIMIT",
+ checks: [nonZero],
+ },
+
// Generic OpenAI InferenceSettings
GenericOpenAiBasePath: {
envKey: "GENERIC_OPEN_AI_BASE_PATH",
@@ -403,6 +417,7 @@ function supportedLLM(input = "") {
"perplexity",
"openrouter",
"groq",
+ "koboldcpp",
"cohere",
"generic-openai",
].includes(input);
From 1aa8e5766f6ae7ce216006a8f963244cf7061517 Mon Sep 17 00:00:00 2001
From: timothycarambat
Date: Thu, 2 May 2024 13:05:20 -0700
Subject: [PATCH 2/4] duplicate key (no impact)
---
collector/utils/WhisperProviders/OpenAiWhisper.js | 1 -
1 file changed, 1 deletion(-)
diff --git a/collector/utils/WhisperProviders/OpenAiWhisper.js b/collector/utils/WhisperProviders/OpenAiWhisper.js
index 8460ffea..fc163edd 100644
--- a/collector/utils/WhisperProviders/OpenAiWhisper.js
+++ b/collector/utils/WhisperProviders/OpenAiWhisper.js
@@ -22,7 +22,6 @@ class OpenAiWhisper {
.create({
file: fs.createReadStream(fullFilePath),
model: this.model,
- model: "whisper-1",
response_format: "text",
temperature: this.temperature,
})
From 2d215acb75f13253b3a662a4bad89e62160bdebe Mon Sep 17 00:00:00 2001
From: timothycarambat
Date: Thu, 2 May 2024 14:03:10 -0700
Subject: [PATCH 3/4] patch storage dirs for extensions
---
collector/utils/extensions/Confluence/index.js | 16 +++++++++++-----
collector/utils/extensions/GithubRepo/index.js | 16 +++++++++++-----
.../utils/extensions/YoutubeTranscript/index.js | 16 +++++++++++-----
3 files changed, 33 insertions(+), 15 deletions(-)
diff --git a/collector/utils/extensions/Confluence/index.js b/collector/utils/extensions/Confluence/index.js
index 1ea642e1..5a473f65 100644
--- a/collector/utils/extensions/Confluence/index.js
+++ b/collector/utils/extensions/Confluence/index.js
@@ -66,11 +66,17 @@ async function loadConfluence({ pageUrl, username, accessToken }) {
const outFolder = slugify(
`${subdomain}-confluence-${v4().slice(0, 4)}`
).toLowerCase();
- const outFolderPath = path.resolve(
- __dirname,
- `../../../../server/storage/documents/${outFolder}`
- );
- fs.mkdirSync(outFolderPath);
+
+ const outFolderPath =
+ process.env.NODE_ENV === "development"
+ ? path.resolve(
+ __dirname,
+ `../../../../server/storage/documents/${outFolder}`
+ )
+ : path.resolve(process.env.STORAGE_DIR, `documents/${outFolder}`);
+
+ if (!fs.existsSync(outFolderPath))
+ fs.mkdirSync(outFolderPath, { recursive: true });
docs.forEach((doc) => {
const data = {
diff --git a/collector/utils/extensions/GithubRepo/index.js b/collector/utils/extensions/GithubRepo/index.js
index e5925f1d..a694a8cd 100644
--- a/collector/utils/extensions/GithubRepo/index.js
+++ b/collector/utils/extensions/GithubRepo/index.js
@@ -31,11 +31,17 @@ async function loadGithubRepo(args) {
const outFolder = slugify(
`${repo.author}-${repo.project}-${repo.branch}-${v4().slice(0, 4)}`
).toLowerCase();
- const outFolderPath = path.resolve(
- __dirname,
- `../../../../server/storage/documents/${outFolder}`
- );
- fs.mkdirSync(outFolderPath);
+
+ const outFolderPath =
+ process.env.NODE_ENV === "development"
+ ? path.resolve(
+ __dirname,
+ `../../../../server/storage/documents/${outFolder}`
+ )
+ : path.resolve(process.env.STORAGE_DIR, `documents/${outFolder}`);
+
+ if (!fs.existsSync(outFolderPath))
+ fs.mkdirSync(outFolderPath, { recursive: true });
for (const doc of docs) {
if (!doc.pageContent) continue;
diff --git a/collector/utils/extensions/YoutubeTranscript/index.js b/collector/utils/extensions/YoutubeTranscript/index.js
index 10c08b61..e5fa336b 100644
--- a/collector/utils/extensions/YoutubeTranscript/index.js
+++ b/collector/utils/extensions/YoutubeTranscript/index.js
@@ -67,11 +67,17 @@ async function loadYouTubeTranscript({ url }) {
const outFolder = slugify(
`${metadata.author} YouTube transcripts`
).toLowerCase();
- const outFolderPath = path.resolve(
- __dirname,
- `../../../../server/storage/documents/${outFolder}`
- );
- if (!fs.existsSync(outFolderPath)) fs.mkdirSync(outFolderPath);
+
+ const outFolderPath =
+ process.env.NODE_ENV === "development"
+ ? path.resolve(
+ __dirname,
+ `../../../../server/storage/documents/${outFolder}`
+ )
+ : path.resolve(process.env.STORAGE_DIR, `documents/${outFolder}`);
+
+ if (!fs.existsSync(outFolderPath))
+ fs.mkdirSync(outFolderPath, { recursive: true });
const data = {
id: v4(),
From 0eb16f2c60f13f38d220e6f8e26473b02f164697 Mon Sep 17 00:00:00 2001
From: timothycarambat
Date: Thu, 2 May 2024 15:51:56 -0700
Subject: [PATCH 4/4] fix readme bold lol
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a56d24ac..d5e4c301 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@
- AnythingLLM: The all-in-one AI app you were looking for.
+ AnythingLLM: The all-in-one AI app you were looking for.
Chat with your docs, use AI Agents, hyper-configurable, multi-user, & no fustrating set up required.