mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-10 17:00:11 +01:00
Support dynamic context length - VoyageAI (#1489)
This commit is contained in:
parent
a256db132d
commit
8c5a30db9d
@ -394,16 +394,17 @@ export default function GeneralLLMPreference() {
|
||||
>
|
||||
<div className="flex gap-x-4 items-center">
|
||||
<img
|
||||
src={selectedLLMObject.logo}
|
||||
alt={`${selectedLLMObject.name} logo`}
|
||||
src={selectedLLMObject?.logo || AnythingLLMIcon}
|
||||
alt={`${selectedLLMObject?.name} logo`}
|
||||
className="w-10 h-10 rounded-md"
|
||||
/>
|
||||
<div className="flex flex-col text-left">
|
||||
<div className="text-sm font-semibold text-white">
|
||||
{selectedLLMObject.name}
|
||||
{selectedLLMObject?.name || "None selected"}
|
||||
</div>
|
||||
<div className="mt-1 text-xs text-[#D2D5DB]">
|
||||
{selectedLLMObject.description}
|
||||
{selectedLLMObject?.description ||
|
||||
"You need to select an LLM"}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -15,7 +15,22 @@ class VoyageAiEmbedder {
|
||||
|
||||
// Limit of how many strings we can process in a single pass to stay with resource or network limits
|
||||
this.batchSize = 128; // Voyage AI's limit per request is 128 https://docs.voyageai.com/docs/rate-limits#use-larger-batches
|
||||
this.embeddingMaxChunkLength = 4000; // https://docs.voyageai.com/docs/embeddings - assume a token is roughly 4 letters with some padding
|
||||
this.embeddingMaxChunkLength = this.#getMaxEmbeddingLength();
|
||||
}
|
||||
|
||||
// https://docs.voyageai.com/docs/embeddings
|
||||
#getMaxEmbeddingLength() {
|
||||
switch (this.model) {
|
||||
case "voyage-large-2-instruct":
|
||||
case "voyage-law-2":
|
||||
case "voyage-code-2":
|
||||
case "voyage-large-2":
|
||||
return 16_000;
|
||||
case "voyage-2":
|
||||
return 4_000;
|
||||
default:
|
||||
return 4_000;
|
||||
}
|
||||
}
|
||||
|
||||
async embedTextInput(textInput) {
|
||||
|
Loading…
Reference in New Issue
Block a user