mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-19 20:50:09 +01:00
2a556c275c
* 1. Define LLM Temperature as a workspace setting 2. Implement rudimentry table migration code for both new and existing repos to bring tables up to date 3. Trigger for workspace on update to update timestamp 4. Always fallback temp to 0.7 5. Extract WorkspaceModal into Tabbed content 6. Remove workspace name UNIQUE constraint (cannot be migrated :() 7. Add slug +seed when existing slug is already take 8. Seperate name from slug so display names can be changed * remove blocking test return
77 lines
2.1 KiB
JavaScript
77 lines
2.1 KiB
JavaScript
const { Configuration, OpenAIApi } = require("openai");
|
|
class OpenAi {
|
|
constructor() {
|
|
const config = new Configuration({
|
|
apiKey: process.env.OPEN_AI_KEY,
|
|
// organization: "org-123xyz", // Optional
|
|
});
|
|
const openai = new OpenAIApi(config);
|
|
this.openai = openai;
|
|
}
|
|
isValidChatModel(modelName = "") {
|
|
const validModels = ["gpt-4", "gpt-3.5-turbo"];
|
|
return validModels.includes(modelName);
|
|
}
|
|
|
|
async isSafe(input = "") {
|
|
const { flagged = false, categories = {} } = await this.openai
|
|
.createModeration({ input })
|
|
.then((json) => {
|
|
const res = json.data;
|
|
if (!res.hasOwnProperty("results"))
|
|
throw new Error("OpenAI moderation: No results!");
|
|
if (res.results.length === 0)
|
|
throw new Error("OpenAI moderation: No results length!");
|
|
return res.results[0];
|
|
});
|
|
|
|
if (!flagged) return { safe: true, reasons: [] };
|
|
const reasons = Object.keys(categories)
|
|
.map((category) => {
|
|
const value = categories[category];
|
|
if (value === true) {
|
|
return category.replace("/", " or ");
|
|
} else {
|
|
return null;
|
|
}
|
|
})
|
|
.filter((reason) => !!reason);
|
|
|
|
return { safe: false, reasons };
|
|
}
|
|
|
|
async sendChat(chatHistory = [], prompt, workspace = {}) {
|
|
const model = process.env.OPEN_MODEL_PREF;
|
|
if (!this.isValidChatModel(model))
|
|
throw new Error(
|
|
`OpenAI chat: ${model} is not valid for chat completion!`
|
|
);
|
|
|
|
const textResponse = await this.openai
|
|
.createChatCompletion({
|
|
model,
|
|
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
|
n: 1,
|
|
messages: [
|
|
{ role: "system", content: "" },
|
|
...chatHistory,
|
|
{ role: "user", content: prompt },
|
|
],
|
|
})
|
|
.then((json) => {
|
|
const res = json.data;
|
|
if (!res.hasOwnProperty("choices"))
|
|
throw new Error("OpenAI chat: No results!");
|
|
if (res.choices.length === 0)
|
|
throw new Error("OpenAI chat: No results length!");
|
|
return res.choices[0].message.content;
|
|
});
|
|
|
|
return textResponse;
|
|
}
|
|
}
|
|
|
|
module.exports = {
|
|
OpenAi,
|
|
};
|