2024-01-10 21:35:30 +01:00
const { chatPrompt } = require ( "../../chats" ) ;
function togetherAiModels ( ) {
const { MODELS } = require ( "./models.js" ) ;
return MODELS || { } ;
}
class TogetherAiLLM {
2024-01-17 21:59:25 +01:00
constructor ( embedder = null , modelPreference = null ) {
2024-01-10 21:35:30 +01:00
const { Configuration , OpenAIApi } = require ( "openai" ) ;
if ( ! process . env . TOGETHER _AI _API _KEY )
throw new Error ( "No TogetherAI API key was set." ) ;
const config = new Configuration ( {
basePath : "https://api.together.xyz/v1" ,
apiKey : process . env . TOGETHER _AI _API _KEY ,
} ) ;
this . openai = new OpenAIApi ( config ) ;
2024-01-17 21:59:25 +01:00
this . model = modelPreference || process . env . TOGETHER _AI _MODEL _PREF ;
2024-01-10 21:35:30 +01:00
this . limits = {
history : this . promptWindowLimit ( ) * 0.15 ,
system : this . promptWindowLimit ( ) * 0.15 ,
user : this . promptWindowLimit ( ) * 0.7 ,
} ;
if ( ! embedder )
throw new Error (
"INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM."
) ;
this . embedder = embedder ;
2024-01-17 23:42:05 +01:00
this . defaultTemp = 0.7 ;
2024-01-10 21:35:30 +01:00
}
# appendContext ( contextTexts = [ ] ) {
if ( ! contextTexts || ! contextTexts . length ) return "" ;
return (
"\nContext:\n" +
contextTexts
. map ( ( text , i ) => {
return ` [CONTEXT ${ i } ]: \n ${ text } \n [END CONTEXT ${ i } ] \n \n ` ;
} )
. join ( "" )
) ;
}
allModelInformation ( ) {
return togetherAiModels ( ) ;
}
streamingEnabled ( ) {
return "streamChat" in this && "streamGetChatCompletion" in this ;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit ( ) {
const availableModels = this . allModelInformation ( ) ;
return availableModels [ this . model ] ? . maxLength || 4096 ;
}
async isValidChatCompletionModel ( model = "" ) {
const availableModels = this . allModelInformation ( ) ;
return availableModels . hasOwnProperty ( model ) ;
}
constructPrompt ( {
systemPrompt = "" ,
contextTexts = [ ] ,
chatHistory = [ ] ,
userPrompt = "" ,
} ) {
const prompt = {
role : "system" ,
content : ` ${ systemPrompt } ${ this . # appendContext ( contextTexts ) } ` ,
} ;
return [ prompt , ... chatHistory , { role : "user" , content : userPrompt } ] ;
}
async isSafe ( _input = "" ) {
// Not implemented so must be stubbed
return { safe : true , reasons : [ ] } ;
}
async sendChat ( chatHistory = [ ] , prompt , workspace = { } , rawHistory = [ ] ) {
if ( ! ( await this . isValidChatCompletionModel ( this . model ) ) )
throw new Error (
` Together AI chat: ${ this . model } is not valid for chat completion! `
) ;
const textResponse = await this . openai
. createChatCompletion ( {
model : this . model ,
2024-01-17 23:42:05 +01:00
temperature : Number ( workspace ? . openAiTemp ? ? this . defaultTemp ) ,
2024-01-10 21:35:30 +01:00
n : 1 ,
messages : await this . compressMessages (
{
systemPrompt : chatPrompt ( workspace ) ,
userPrompt : prompt ,
chatHistory ,
} ,
rawHistory
) ,
} )
. then ( ( json ) => {
const res = json . data ;
if ( ! res . hasOwnProperty ( "choices" ) )
throw new Error ( "Together AI chat: No results!" ) ;
if ( res . choices . length === 0 )
throw new Error ( "Together AI chat: No results length!" ) ;
return res . choices [ 0 ] . message . content ;
} )
. catch ( ( error ) => {
throw new Error (
` TogetherAI::createChatCompletion failed with: ${ error . message } `
) ;
} ) ;
return textResponse ;
}
async streamChat ( chatHistory = [ ] , prompt , workspace = { } , rawHistory = [ ] ) {
if ( ! ( await this . isValidChatCompletionModel ( this . model ) ) )
throw new Error (
` TogetherAI chat: ${ this . model } is not valid for chat completion! `
) ;
const streamRequest = await this . openai . createChatCompletion (
{
model : this . model ,
stream : true ,
2024-01-17 23:42:05 +01:00
temperature : Number ( workspace ? . openAiTemp ? ? this . defaultTemp ) ,
2024-01-10 21:35:30 +01:00
n : 1 ,
messages : await this . compressMessages (
{
systemPrompt : chatPrompt ( workspace ) ,
userPrompt : prompt ,
chatHistory ,
} ,
rawHistory
) ,
} ,
{ responseType : "stream" }
) ;
return { type : "togetherAiStream" , stream : streamRequest } ;
}
async getChatCompletion ( messages = null , { temperature = 0.7 } ) {
if ( ! ( await this . isValidChatCompletionModel ( this . model ) ) )
throw new Error (
` TogetherAI chat: ${ this . model } is not valid for chat completion! `
) ;
const { data } = await this . openai . createChatCompletion ( {
model : this . model ,
messages ,
temperature ,
} ) ;
if ( ! data . hasOwnProperty ( "choices" ) ) return null ;
return data . choices [ 0 ] . message . content ;
}
async streamGetChatCompletion ( messages = null , { temperature = 0.7 } ) {
if ( ! ( await this . isValidChatCompletionModel ( this . model ) ) )
throw new Error (
` TogetherAI chat: ${ this . model } is not valid for chat completion! `
) ;
const streamRequest = await this . openai . createChatCompletion (
{
model : this . model ,
stream : true ,
messages ,
temperature ,
} ,
{ responseType : "stream" }
) ;
return { type : "togetherAiStream" , stream : streamRequest } ;
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput ( textInput ) {
return await this . embedder . embedTextInput ( textInput ) ;
}
async embedChunks ( textChunks = [ ] ) {
return await this . embedder . embedChunks ( textChunks ) ;
}
async compressMessages ( promptArgs = { } , rawHistory = [ ] ) {
const { messageArrayCompressor } = require ( "../../helpers/chat" ) ;
const messageArray = this . constructPrompt ( promptArgs ) ;
return await messageArrayCompressor ( this , messageArray , rawHistory ) ;
}
}
module . exports = {
TogetherAiLLM ,
togetherAiModels ,
} ;