const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { chatPrompt } = require("../../chats"); const { handleDefaultStreamResponse } = require("../../helpers/chat/responses"); class GroqLLM { constructor(embedder = null, modelPreference = null) { const { Configuration, OpenAIApi } = require("openai"); if (!process.env.GROQ_API_KEY) throw new Error("No Groq API key was set."); const config = new Configuration({ basePath: "https://api.groq.com/openai/v1", apiKey: process.env.GROQ_API_KEY, }); this.openai = new OpenAIApi(config); this.model = modelPreference || process.env.GROQ_MODEL_PREF || "llama2-70b-4096"; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, user: this.promptWindowLimit() * 0.7, }; this.embedder = !embedder ? new NativeEmbedder() : embedder; this.defaultTemp = 0.7; } #appendContext(contextTexts = []) { if (!contextTexts || !contextTexts.length) return ""; return ( "\nContext:\n" + contextTexts .map((text, i) => { return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; }) .join("") ); } streamingEnabled() { return "streamChat" in this && "streamGetChatCompletion" in this; } promptWindowLimit() { switch (this.model) { case "llama2-70b-4096": return 4096; case "mixtral-8x7b-32768": return 32_768; case "llama3-8b-8192": return 8192; case "llama3-70b-8192": return 8192; case "gemma-7b-it": return 8192; default: return 4096; } } async isValidChatCompletionModel(modelName = "") { const validModels = [ "llama2-70b-4096", "mixtral-8x7b-32768", "llama3-8b-8192", "llama3-70b-8192", "gemma-7b-it", ]; const isPreset = validModels.some((model) => modelName === model); if (isPreset) return true; const model = await this.openai .retrieveModel(modelName) .then((res) => res.data) .catch(() => null); return !!model; } constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; } async isSafe(_input = "") { // Not implemented so must be stubbed return { safe: true, reasons: [] }; } async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `Groq chat: ${this.model} is not valid for chat completion!` ); const textResponse = await this.openai .createChatCompletion({ model: this.model, temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { systemPrompt: chatPrompt(workspace), userPrompt: prompt, chatHistory, }, rawHistory ), }) .then((json) => { const res = json.data; if (!res.hasOwnProperty("choices")) throw new Error("GroqAI chat: No results!"); if (res.choices.length === 0) throw new Error("GroqAI chat: No results length!"); return res.choices[0].message.content; }) .catch((error) => { throw new Error( `GroqAI::createChatCompletion failed with: ${error.message}` ); }); return textResponse; } async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `GroqAI:streamChat: ${this.model} is not valid for chat completion!` ); const streamRequest = await this.openai.createChatCompletion( { model: this.model, stream: true, temperature: Number(workspace?.openAiTemp ?? this.defaultTemp), n: 1, messages: await this.compressMessages( { systemPrompt: chatPrompt(workspace), userPrompt: prompt, chatHistory, }, rawHistory ), }, { responseType: "stream" } ); return streamRequest; } async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `GroqAI:chatCompletion: ${this.model} is not valid for chat completion!` ); const { data } = await this.openai .createChatCompletion({ model: this.model, messages, temperature, }) .catch((e) => { throw new Error(e.response.data.error.message); }); if (!data.hasOwnProperty("choices")) return null; return data.choices[0].message.content; } async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `GroqAI:streamChatCompletion: ${this.model} is not valid for chat completion!` ); const streamRequest = await this.openai.createChatCompletion( { model: this.model, stream: true, messages, temperature, }, { responseType: "stream" } ); return streamRequest; } handleStream(response, stream, responseProps) { return handleDefaultStreamResponse(response, stream, responseProps); } // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); } async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); } async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); } } module.exports = { GroqLLM, };