[FIX] Add max tokens field to generic OpenAI LLM connector (#1345)

* add max tokens field to generic openai llm connector

* add max_tokens property to generic openai agent provider
This commit is contained in:
Sean Hatfield 2024-05-10 14:49:02 -07:00 committed by GitHub
parent 734c5a9e96
commit 0a6a9e40c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 25 additions and 0 deletions

View File

@ -61,6 +61,21 @@ export default function GenericOpenAiOptions({ settings }) {
autoComplete="off" autoComplete="off"
/> />
</div> </div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Max Tokens
</label>
<input
type="number"
name="GenericOpenAiMaxTokens"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="Max tokens per request (eg: 1024)"
min={1}
defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
required={true}
autoComplete="off"
/>
</div>
</div> </div>
); );
} }

View File

@ -373,6 +373,7 @@ const SystemSettings = {
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF, GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT, GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY, GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
// Cohere API Keys // Cohere API Keys
CohereApiKey: !!process.env.COHERE_API_KEY, CohereApiKey: !!process.env.COHERE_API_KEY,

View File

@ -18,6 +18,7 @@ class GenericOpenAiLLM {
}); });
this.model = this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null; modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
if (!this.model) if (!this.model)
throw new Error("GenericOpenAI must have a valid model set."); throw new Error("GenericOpenAI must have a valid model set.");
this.limits = { this.limits = {
@ -94,6 +95,7 @@ class GenericOpenAiLLM {
model: this.model, model: this.model,
messages, messages,
temperature, temperature,
max_tokens: this.maxTokens,
}) })
.catch((e) => { .catch((e) => {
throw new Error(e.response.data.error.message); throw new Error(e.response.data.error.message);
@ -110,6 +112,7 @@ class GenericOpenAiLLM {
stream: true, stream: true,
messages, messages,
temperature, temperature,
max_tokens: this.maxTokens,
}); });
return streamRequest; return streamRequest;
} }

View File

@ -24,6 +24,7 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
this._client = client; this._client = client;
this.model = model; this.model = model;
this.verbose = true; this.verbose = true;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
} }
get client() { get client() {
@ -36,6 +37,7 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
model: this.model, model: this.model,
temperature: 0, temperature: 0,
messages, messages,
max_tokens: this.maxTokens,
}) })
.then((result) => { .then((result) => {
if (!result.hasOwnProperty("choices")) if (!result.hasOwnProperty("choices"))

View File

@ -173,6 +173,10 @@ const KEY_MAPPING = {
envKey: "GENERIC_OPEN_AI_API_KEY", envKey: "GENERIC_OPEN_AI_API_KEY",
checks: [], checks: [],
}, },
GenericOpenAiMaxTokens: {
envKey: "GENERIC_OPEN_AI_MAX_TOKENS",
checks: [nonZero],
},
EmbeddingEngine: { EmbeddingEngine: {
envKey: "EMBEDDING_ENGINE", envKey: "EMBEDDING_ENGINE",