mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-11 09:10:13 +01:00
91 lines
2.3 KiB
JavaScript
91 lines
2.3 KiB
JavaScript
|
const OpenAI = require("openai");
|
||
|
const Provider = require("./ai-provider.js");
|
||
|
const InheritMultiple = require("./helpers/classes.js");
|
||
|
const UnTooled = require("./helpers/untooled.js");
|
||
|
|
||
|
/**
|
||
|
* The provider for the LMStudio provider.
|
||
|
*/
|
||
|
class LMStudioProvider extends InheritMultiple([Provider, UnTooled]) {
|
||
|
model;
|
||
|
|
||
|
constructor(_config = {}) {
|
||
|
super();
|
||
|
const model = process.env.LMSTUDIO_MODEL_PREF || "Loaded from Chat UI";
|
||
|
const client = new OpenAI({
|
||
|
baseURL: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance
|
||
|
apiKey: null,
|
||
|
maxRetries: 3,
|
||
|
model,
|
||
|
});
|
||
|
this._client = client;
|
||
|
this.model = model;
|
||
|
this.verbose = true;
|
||
|
}
|
||
|
|
||
|
get client() {
|
||
|
return this._client;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Create a completion based on the received messages.
|
||
|
*
|
||
|
* @param messages A list of messages to send to the API.
|
||
|
* @param functions
|
||
|
* @returns The completion.
|
||
|
*/
|
||
|
async complete(messages, functions = null) {
|
||
|
try {
|
||
|
let completion;
|
||
|
if (functions.length > 0) {
|
||
|
const { toolCall, text } = await this.functionCall(messages, functions);
|
||
|
|
||
|
if (toolCall !== null) {
|
||
|
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
|
||
|
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
|
||
|
return {
|
||
|
result: null,
|
||
|
functionCall: {
|
||
|
name: toolCall.name,
|
||
|
arguments: toolCall.arguments,
|
||
|
},
|
||
|
cost: 0,
|
||
|
};
|
||
|
}
|
||
|
completion = { content: text };
|
||
|
}
|
||
|
|
||
|
if (!completion?.content) {
|
||
|
this.providerLog(
|
||
|
"Will assume chat completion without tool call inputs."
|
||
|
);
|
||
|
const response = await this.client.chat.completions.create({
|
||
|
model: this.model,
|
||
|
messages: this.cleanMsgs(messages),
|
||
|
});
|
||
|
completion = response.choices[0].message;
|
||
|
}
|
||
|
|
||
|
return {
|
||
|
result: completion.content,
|
||
|
cost: 0,
|
||
|
};
|
||
|
} catch (error) {
|
||
|
throw error;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
* Get the cost of the completion.
|
||
|
*
|
||
|
* @param _usage The completion to get the cost for.
|
||
|
* @returns The cost of the completion.
|
||
|
* Stubbed since LMStudio has no cost basis.
|
||
|
*/
|
||
|
getCost(_usage) {
|
||
|
return 0;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
module.exports = LMStudioProvider;
|