anything-llm/server/utils/AiProviders/openAi/index.js

163 lines
4.7 KiB
JavaScript
Raw Normal View History

const { toChunks } = require("../../helpers");
2023-06-04 04:28:07 +02:00
class OpenAi {
constructor() {
const { Configuration, OpenAIApi } = require("openai");
2023-06-08 06:31:35 +02:00
const config = new Configuration({
apiKey: process.env.OPEN_AI_KEY,
});
2023-06-04 04:28:07 +02:00
const openai = new OpenAIApi(config);
2023-06-08 06:31:35 +02:00
this.openai = openai;
// Arbitrary limit to ensure we stay within reasonable POST request size.
this.embeddingChunkLimit = 1_000;
2023-06-04 04:28:07 +02:00
}
2023-06-08 06:31:35 +02:00
isValidChatModel(modelName = "") {
const validModels = ["gpt-4", "gpt-3.5-turbo"];
return validModels.includes(modelName);
2023-06-04 04:28:07 +02:00
}
2023-06-08 06:31:35 +02:00
async isSafe(input = "") {
const { flagged = false, categories = {} } = await this.openai
.createModeration({ input })
2023-06-04 04:28:07 +02:00
.then((json) => {
const res = json.data;
2023-06-08 06:31:35 +02:00
if (!res.hasOwnProperty("results"))
throw new Error("OpenAI moderation: No results!");
if (res.results.length === 0)
throw new Error("OpenAI moderation: No results length!");
return res.results[0];
})
.catch((error) => {
throw new Error(
`OpenAI::CreateModeration failed with: ${error.message}`
);
2023-06-08 06:31:35 +02:00
});
2023-06-04 04:28:07 +02:00
if (!flagged) return { safe: true, reasons: [] };
2023-06-08 06:31:35 +02:00
const reasons = Object.keys(categories)
.map((category) => {
const value = categories[category];
if (value === true) {
return category.replace("/", " or ");
} else {
return null;
}
})
.filter((reason) => !!reason);
2023-06-04 04:28:07 +02:00
2023-06-08 06:31:35 +02:00
return { safe: false, reasons };
2023-06-04 04:28:07 +02:00
}
async sendChat(chatHistory = [], prompt, workspace = {}) {
2023-06-08 06:31:35 +02:00
const model = process.env.OPEN_MODEL_PREF;
if (!this.isValidChatModel(model))
throw new Error(
`OpenAI chat: ${model} is not valid for chat completion!`
);
2023-06-04 04:28:07 +02:00
2023-06-08 06:31:35 +02:00
const textResponse = await this.openai
.createChatCompletion({
model,
temperature: Number(workspace?.openAiTemp ?? 0.7),
2023-06-08 06:31:35 +02:00
n: 1,
messages: [
{ role: "system", content: "" },
...chatHistory,
{ role: "user", content: prompt },
],
2023-06-04 04:28:07 +02:00
})
2023-06-08 06:31:35 +02:00
.then((json) => {
const res = json.data;
if (!res.hasOwnProperty("choices"))
throw new Error("OpenAI chat: No results!");
if (res.choices.length === 0)
throw new Error("OpenAI chat: No results length!");
return res.choices[0].message.content;
})
.catch((error) => {
console.log(error);
throw new Error(
`OpenAI::createChatCompletion failed with: ${error.message}`
);
2023-06-08 06:31:35 +02:00
});
2023-06-04 04:28:07 +02:00
2023-06-08 06:31:35 +02:00
return textResponse;
2023-06-04 04:28:07 +02:00
}
async getChatCompletion(messages = [], { temperature = 0.7 }) {
const model = process.env.OPEN_MODEL_PREF || "gpt-3.5-turbo";
const { data } = await this.openai.createChatCompletion({
model,
messages,
temperature,
});
if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
}
async embedTextInput(textInput) {
const result = await this.embedChunks(textInput);
return result?.[0] || [];
}
async embedChunks(textChunks = []) {
// Because there is a hard POST limit on how many chunks can be sent at once to OpenAI (~8mb)
// we concurrently execute each max batch of text chunks possible.
// Refer to constructor embeddingChunkLimit for more info.
const embeddingRequests = [];
for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) {
embeddingRequests.push(
new Promise((resolve) => {
this.openai
.createEmbedding({
model: "text-embedding-ada-002",
input: chunk,
})
.then((res) => {
resolve({ data: res.data?.data, error: null });
})
.catch((e) => {
resolve({ data: [], error: e?.error });
});
})
);
}
const { data = [], error = null } = await Promise.all(
embeddingRequests
).then((results) => {
// If any errors were returned from OpenAI abort the entire sequence because the embeddings
// will be incomplete.
const errors = results
.filter((res) => !!res.error)
.map((res) => res.error)
.flat();
if (errors.length > 0) {
return {
data: [],
error: `(${errors.length}) Embedding Errors! ${errors
.map((error) => `[${error.type}]: ${error.message}`)
.join(", ")}`,
};
}
return {
data: results.map((res) => res?.data || []).flat(),
error: null,
};
});
if (!!error) throw new Error(`OpenAI Failed to embed: ${error}`);
return data.length > 0 &&
data.every((embd) => embd.hasOwnProperty("embedding"))
? data.map((embd) => embd.embedding)
: null;
}
2023-06-04 04:28:07 +02:00
}
module.exports = {
OpenAi,
};