2024-02-06 18:17:51 +01:00
|
|
|
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
2024-03-12 23:21:27 +01:00
|
|
|
const {
|
2024-04-30 21:33:42 +02:00
|
|
|
handleDefaultStreamResponseV2,
|
2024-03-12 23:21:27 +01:00
|
|
|
} = require("../../helpers/chat/responses");
|
2024-02-06 18:17:51 +01:00
|
|
|
|
|
|
|
class HuggingFaceLLM {
|
|
|
|
constructor(embedder = null, _modelPreference = null) {
|
|
|
|
if (!process.env.HUGGING_FACE_LLM_ENDPOINT)
|
|
|
|
throw new Error("No HuggingFace Inference Endpoint was set.");
|
|
|
|
if (!process.env.HUGGING_FACE_LLM_API_KEY)
|
|
|
|
throw new Error("No HuggingFace Access Token was set.");
|
2024-04-30 21:33:42 +02:00
|
|
|
const { OpenAI: OpenAIApi } = require("openai");
|
2024-02-06 18:17:51 +01:00
|
|
|
|
2024-04-30 21:33:42 +02:00
|
|
|
this.openai = new OpenAIApi({
|
|
|
|
baseURL: `${process.env.HUGGING_FACE_LLM_ENDPOINT}/v1`,
|
2024-02-06 18:17:51 +01:00
|
|
|
apiKey: process.env.HUGGING_FACE_LLM_API_KEY,
|
|
|
|
});
|
|
|
|
// When using HF inference server - the model param is not required so
|
|
|
|
// we can stub it here. HF Endpoints can only run one model at a time.
|
|
|
|
// We set to 'tgi' so that endpoint for HF can accept message format
|
|
|
|
this.model = "tgi";
|
|
|
|
this.limits = {
|
|
|
|
history: this.promptWindowLimit() * 0.15,
|
|
|
|
system: this.promptWindowLimit() * 0.15,
|
|
|
|
user: this.promptWindowLimit() * 0.7,
|
|
|
|
};
|
|
|
|
|
2024-05-17 02:25:05 +02:00
|
|
|
this.embedder = embedder ?? new NativeEmbedder();
|
2024-02-06 18:17:51 +01:00
|
|
|
this.defaultTemp = 0.2;
|
|
|
|
}
|
|
|
|
|
|
|
|
#appendContext(contextTexts = []) {
|
|
|
|
if (!contextTexts || !contextTexts.length) return "";
|
|
|
|
return (
|
|
|
|
"\nContext:\n" +
|
|
|
|
contextTexts
|
|
|
|
.map((text, i) => {
|
|
|
|
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
|
|
|
})
|
|
|
|
.join("")
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
streamingEnabled() {
|
2024-05-02 01:52:28 +02:00
|
|
|
return "streamGetChatCompletion" in this;
|
2024-02-06 18:17:51 +01:00
|
|
|
}
|
|
|
|
|
2024-08-15 21:13:28 +02:00
|
|
|
static promptWindowLimit(_modelName) {
|
|
|
|
const limit = process.env.HUGGING_FACE_LLM_TOKEN_LIMIT || 4096;
|
|
|
|
if (!limit || isNaN(Number(limit)))
|
|
|
|
throw new Error("No HuggingFace token context limit was set.");
|
|
|
|
return Number(limit);
|
|
|
|
}
|
|
|
|
|
2024-02-06 18:17:51 +01:00
|
|
|
promptWindowLimit() {
|
|
|
|
const limit = process.env.HUGGING_FACE_LLM_TOKEN_LIMIT || 4096;
|
|
|
|
if (!limit || isNaN(Number(limit)))
|
|
|
|
throw new Error("No HuggingFace token context limit was set.");
|
|
|
|
return Number(limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
async isValidChatCompletionModel(_ = "") {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
constructPrompt({
|
|
|
|
systemPrompt = "",
|
|
|
|
contextTexts = [],
|
|
|
|
chatHistory = [],
|
|
|
|
userPrompt = "",
|
|
|
|
}) {
|
|
|
|
// System prompt it not enabled for HF model chats
|
|
|
|
const prompt = {
|
|
|
|
role: "user",
|
|
|
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
|
|
|
};
|
|
|
|
const assistantResponse = {
|
|
|
|
role: "assistant",
|
|
|
|
content: "Okay, I will follow those instructions",
|
|
|
|
};
|
|
|
|
return [
|
|
|
|
prompt,
|
|
|
|
assistantResponse,
|
|
|
|
...chatHistory,
|
|
|
|
{ role: "user", content: userPrompt },
|
|
|
|
];
|
|
|
|
}
|
|
|
|
|
|
|
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
2024-08-14 23:39:48 +02:00
|
|
|
const result = await this.openai.chat.completions.create({
|
2024-02-06 18:17:51 +01:00
|
|
|
model: this.model,
|
|
|
|
messages,
|
|
|
|
temperature,
|
|
|
|
});
|
|
|
|
|
2024-04-30 21:33:42 +02:00
|
|
|
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
|
|
|
|
return null;
|
|
|
|
return result.choices[0].message.content;
|
2024-02-06 18:17:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
2024-04-30 21:33:42 +02:00
|
|
|
const streamRequest = await this.openai.chat.completions.create({
|
|
|
|
model: this.model,
|
|
|
|
stream: true,
|
|
|
|
messages,
|
|
|
|
temperature,
|
|
|
|
});
|
2024-02-07 17:15:14 +01:00
|
|
|
return streamRequest;
|
|
|
|
}
|
|
|
|
|
|
|
|
handleStream(response, stream, responseProps) {
|
2024-04-30 21:33:42 +02:00
|
|
|
return handleDefaultStreamResponseV2(response, stream, responseProps);
|
2024-02-06 18:17:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
|
|
|
async embedTextInput(textInput) {
|
|
|
|
return await this.embedder.embedTextInput(textInput);
|
|
|
|
}
|
|
|
|
async embedChunks(textChunks = []) {
|
|
|
|
return await this.embedder.embedChunks(textChunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
async compressMessages(promptArgs = {}, rawHistory = []) {
|
|
|
|
const { messageArrayCompressor } = require("../../helpers/chat");
|
|
|
|
const messageArray = this.constructPrompt(promptArgs);
|
|
|
|
return await messageArrayCompressor(this, messageArray, rawHistory);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
module.exports = {
|
|
|
|
HuggingFaceLLM,
|
|
|
|
};
|