mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-11 01:10:11 +01:00
[FIX] Add max tokens field to generic OpenAI LLM connector (#1345)
* add max tokens field to generic openai llm connector * add max_tokens property to generic openai agent provider
This commit is contained in:
parent
734c5a9e96
commit
0a6a9e40c1
@ -61,6 +61,21 @@ export default function GenericOpenAiOptions({ settings }) {
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Max Tokens
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
name="GenericOpenAiMaxTokens"
|
||||
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Max tokens per request (eg: 1024)"
|
||||
min={1}
|
||||
defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
@ -373,6 +373,7 @@ const SystemSettings = {
|
||||
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
|
||||
GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
|
||||
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
|
||||
GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
|
||||
|
||||
// Cohere API Keys
|
||||
CohereApiKey: !!process.env.COHERE_API_KEY,
|
||||
|
@ -18,6 +18,7 @@ class GenericOpenAiLLM {
|
||||
});
|
||||
this.model =
|
||||
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
|
||||
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
|
||||
if (!this.model)
|
||||
throw new Error("GenericOpenAI must have a valid model set.");
|
||||
this.limits = {
|
||||
@ -94,6 +95,7 @@ class GenericOpenAiLLM {
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature,
|
||||
max_tokens: this.maxTokens,
|
||||
})
|
||||
.catch((e) => {
|
||||
throw new Error(e.response.data.error.message);
|
||||
@ -110,6 +112,7 @@ class GenericOpenAiLLM {
|
||||
stream: true,
|
||||
messages,
|
||||
temperature,
|
||||
max_tokens: this.maxTokens,
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
|
||||
}
|
||||
|
||||
get client() {
|
||||
@ -36,6 +37,7 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model: this.model,
|
||||
temperature: 0,
|
||||
messages,
|
||||
max_tokens: this.maxTokens,
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
|
@ -173,6 +173,10 @@ const KEY_MAPPING = {
|
||||
envKey: "GENERIC_OPEN_AI_API_KEY",
|
||||
checks: [],
|
||||
},
|
||||
GenericOpenAiMaxTokens: {
|
||||
envKey: "GENERIC_OPEN_AI_MAX_TOKENS",
|
||||
checks: [nonZero],
|
||||
},
|
||||
|
||||
EmbeddingEngine: {
|
||||
envKey: "EMBEDDING_ENGINE",
|
||||
|
Loading…
Reference in New Issue
Block a user