mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-10 17:00:11 +01:00
[FEAT] Support for gemini-1.0-pro model and fixes to prompt window limit (#1557)
support for gemini-1.0-pro model and fixes to prompt window limit
This commit is contained in:
parent
c24b79c9d1
commit
3f78ef413b
@ -32,6 +32,7 @@ export default function GeminiLLMOptions({ settings }) {
|
||||
>
|
||||
{[
|
||||
"gemini-pro",
|
||||
"gemini-1.0-pro",
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-flash-latest",
|
||||
].map((model) => {
|
||||
|
@ -10,7 +10,7 @@ export const DISABLED_PROVIDERS = [
|
||||
];
|
||||
const PROVIDER_DEFAULT_MODELS = {
|
||||
openai: [],
|
||||
gemini: ["gemini-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"],
|
||||
gemini: ["gemini-pro","gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"],
|
||||
anthropic: [
|
||||
"claude-instant-1.2",
|
||||
"claude-2.0",
|
||||
|
@ -91,6 +91,10 @@ class GeminiLLM {
|
||||
switch (this.model) {
|
||||
case "gemini-pro":
|
||||
return 30_720;
|
||||
case "gemini-1.0-pro":
|
||||
return 30_720;
|
||||
case "gemini-1.5-flash-latest":
|
||||
return 1_048_576;
|
||||
case "gemini-1.5-pro-latest":
|
||||
return 1_048_576;
|
||||
default:
|
||||
@ -101,6 +105,7 @@ class GeminiLLM {
|
||||
isValidChatCompletionModel(modelName = "") {
|
||||
const validModels = [
|
||||
"gemini-pro",
|
||||
"gemini-1.0-pro",
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-flash-latest",
|
||||
];
|
||||
|
@ -532,6 +532,7 @@ function supportedTranscriptionProvider(input = "") {
|
||||
function validGeminiModel(input = "") {
|
||||
const validModels = [
|
||||
"gemini-pro",
|
||||
"gemini-1.0-pro",
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-flash-latest",
|
||||
];
|
||||
|
Loading…
Reference in New Issue
Block a user