[FEAT] Claude 3 support and implement new version of Anthropic SDK (#863)

* implement new version of anthropic sdk and support new models

* remove handleAnthropicStream and move to handleStream inside anthropic provider

* update useGetProvidersModels for new anthropic models
This commit is contained in:
Sean Hatfield 2024-03-06 14:57:47 -08:00 committed by GitHub
parent 0634013788
commit e0d5d8039a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 146 additions and 82 deletions

View File

@ -48,7 +48,13 @@ export default function AnthropicAiOptions({ settings, showAlert = false }) {
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{["claude-2", "claude-instant-1"].map((model) => {
{[
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
].map((model) => {
return (
<option key={model} value={model}>
{model}

View File

@ -13,7 +13,13 @@ const PROVIDER_DEFAULT_MODELS = {
"gpt-4-32k",
],
gemini: ["gemini-pro"],
anthropic: ["claude-2", "claude-instant-1"],
anthropic: [
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
],
azure: [],
lmstudio: [],
localai: [],

View File

@ -20,7 +20,7 @@
"seed": "node prisma/seed.js"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.8.1",
"@anthropic-ai/sdk": "^0.16.1",
"@azure/openai": "1.0.0-beta.10",
"@datastax/astra-db-ts": "^0.1.3",
"@google/generative-ai": "^0.1.3",

View File

@ -1,6 +1,6 @@
const { v4 } = require("uuid");
const { chatPrompt } = require("../../chats");
const { writeResponseChunk } = require("../../helpers/chat/responses");
class AnthropicLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.ANTHROPIC_API_KEY)
@ -13,7 +13,7 @@ class AnthropicLLM {
});
this.anthropic = anthropic;
this.model =
modelPreference || process.env.ANTHROPIC_MODEL_PREF || "claude-2";
modelPreference || process.env.ANTHROPIC_MODEL_PREF || "claude-2.0";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -35,17 +35,29 @@ class AnthropicLLM {
promptWindowLimit() {
switch (this.model) {
case "claude-instant-1":
return 72_000;
case "claude-2":
case "claude-instant-1.2":
return 100_000;
case "claude-2.0":
return 100_000;
case "claude-2.1":
return 200_000;
case "claude-3-opus-20240229":
return 200_000;
case "claude-3-sonnet-20240229":
return 200_000;
default:
return 72_000; // assume a claude-instant-1 model
return 100_000; // assume a claude-instant-1.2 model
}
}
isValidChatCompletionModel(modelName = "") {
const validModels = ["claude-2", "claude-instant-1"];
const validModels = [
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
];
return validModels.includes(modelName);
}
@ -62,36 +74,43 @@ class AnthropicLLM {
chatHistory = [],
userPrompt = "",
}) {
return `\n\nHuman: Please read question supplied within the <question> tags. Using all information generate an answer to the question and output it within <${
this.answerKey
}> tags. Previous conversations can be used within the <history> tags and can be used to influence the output. Content between the <system> tag is additional information and instruction that will impact how answers are formatted or responded to. Additional contextual information retrieved to help answer the users specific query is available to use for answering and can be found between <context> tags. When no <context> tags may are present use the knowledge available and in the conversation to answer. When one or more <context> tags are available you will use those to help answer the question or augment pre-existing knowledge. You should never say "Based on the provided context" or other phrasing that is not related to the user question.
<system>${systemPrompt}</system>
${contextTexts
.map((text, i) => {
return `<context>${text}</context>\n`;
})
.join("")}
<history>${chatHistory.map((history) => {
switch (history.role) {
case "assistant":
return `\n\nAssistant: ${history.content}`;
case "user":
return `\n\nHuman: ${history.content}`;
default:
return "\n";
}
})}</history>
<question>${userPrompt}</question>
\n\nAssistant:`;
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Anthropic chat: ${this.model} is not valid for chat completion!`
);
const compressedPrompt = await this.compressMessages(
try {
const response = await this.anthropic.messages.create({
model: this.model,
max_tokens: 4096,
system: messages[0].content, // Strip out the system message
messages: messages.slice(1), // Pop off the system message
temperature: Number(temperature ?? this.defaultTemp),
});
return response.content[0].text;
} catch (error) {
console.log(error);
return error;
}
}
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Anthropic chat: ${this.model} is not valid for chat completion!`
);
const messages = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
@ -99,58 +118,85 @@ class AnthropicLLM {
},
rawHistory
);
const { content, error } = await this.anthropic.completions
.create({
model: this.model,
max_tokens_to_sample: 300,
prompt: compressedPrompt,
})
.then((res) => {
const { completion } = res;
const re = new RegExp(
"(?:<" + this.answerKey + ">)([\\s\\S]*)(?:</" + this.answerKey + ">)"
);
const response = completion.match(re)?.[1]?.trim();
if (!response)
throw new Error("Anthropic: No response could be parsed.");
return { content: response, error: null };
})
.catch((e) => {
return { content: null, error: e.message };
});
if (error) throw new Error(error);
return content;
const streamRequest = await this.anthropic.messages.stream({
model: this.model,
max_tokens: 4096,
system: messages[0].content, // Strip out the system message
messages: messages.slice(1), // Pop off the system message
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
});
return streamRequest;
}
async getChatCompletion(prompt = "", _opts = {}) {
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Anthropic chat: ${this.model} is not valid for chat completion!`
`OpenAI chat: ${this.model} is not valid for chat completion!`
);
const { content, error } = await this.anthropic.completions
.create({
model: this.model,
max_tokens_to_sample: 300,
prompt,
})
.then((res) => {
const { completion } = res;
const re = new RegExp(
"(?:<" + this.answerKey + ">)([\\s\\S]*)(?:</" + this.answerKey + ">)"
);
const response = completion.match(re)?.[1]?.trim();
if (!response)
throw new Error("Anthropic: No response could be parsed.");
return { content: response, error: null };
})
.catch((e) => {
return { content: null, error: e.message };
});
const streamRequest = await this.anthropic.messages.stream({
model: this.model,
max_tokens: 4096,
system: messages[0].content, // Strip out the system message
messages: messages.slice(1), // Pop off the system message
temperature: Number(temperature ?? this.defaultTemp),
});
return streamRequest;
}
if (error) throw new Error(error);
return content;
handleStream(response, stream, responseProps) {
return new Promise((resolve) => {
let fullText = "";
const { uuid = v4(), sources = [] } = responseProps;
stream.on("streamEvent", (message) => {
const data = message;
if (
data.type === "content_block_delta" &&
data.delta.type === "text_delta"
) {
const text = data.delta.text;
fullText += text;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: text,
close: false,
error: false,
});
}
if (
message.type === "message_stop" ||
(data.stop_reason && data.stop_reason === "end_turn")
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
resolve(fullText);
}
});
});
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
async compressMessages(promptArgs = {}, rawHistory = []) {

View File

@ -359,7 +359,13 @@ function validGeminiModel(input = "") {
}
function validAnthropicModel(input = "") {
const validModels = ["claude-2", "claude-instant-1"];
const validModels = [
"claude-instant-1.2",
"claude-2.0",
"claude-2.1",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
];
return validModels.includes(input)
? null
: `Invalid Model type. Must be one of ${validModels.join(", ")}.`;

View File

@ -7,10 +7,10 @@
resolved "https://registry.yarnpkg.com/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz#bd9154aec9983f77b3a034ecaa015c2e4201f6cf"
integrity sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==
"@anthropic-ai/sdk@^0.8.1":
version "0.8.1"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.8.1.tgz#7c7c6cb262abe3e6d0bb8bd1179b4589edd7a6ad"
integrity sha512-59etePenCizVx1O8Qhi1T1ruE04ISfNzCnyhZNcsss1QljsLmYS83jttarMNEvGYcsUF7rwxw2lzcC3Zbxao7g==
"@anthropic-ai/sdk@^0.16.1":
version "0.16.1"
resolved "https://registry.yarnpkg.com/@anthropic-ai/sdk/-/sdk-0.16.1.tgz#7472c42389d9a5323c20afa53995e1c3b922b95d"
integrity sha512-vHgvfWEyFy5ktqam56Nrhv8MVa7EJthsRYNi+1OrFFfyrj9tR2/aji1QbVbQjYU/pPhPFaYrdCEC/MLPFrmKwA==
dependencies:
"@types/node" "^18.11.18"
"@types/node-fetch" "^2.6.4"