mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-14 02:20:12 +01:00
[FEAT] Add support for more groq models (Llama 3 and Gemma) (#1143)
add support for more groq models
This commit is contained in:
parent
7e3b8cd4fc
commit
897e168fd1
@ -28,7 +28,13 @@ export default function GroqAiOptions({ settings }) {
|
|||||||
required={true}
|
required={true}
|
||||||
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||||
>
|
>
|
||||||
{["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => {
|
{[
|
||||||
|
"llama2-70b-4096",
|
||||||
|
"mixtral-8x7b-32768",
|
||||||
|
"llama3-8b-8192",
|
||||||
|
"llama3-70b-8192",
|
||||||
|
"gemma-7b-it",
|
||||||
|
].map((model) => {
|
||||||
return (
|
return (
|
||||||
<option key={model} value={model}>
|
<option key={model} value={model}>
|
||||||
{model}
|
{model}
|
||||||
|
@ -19,7 +19,13 @@ const PROVIDER_DEFAULT_MODELS = {
|
|||||||
localai: [],
|
localai: [],
|
||||||
ollama: [],
|
ollama: [],
|
||||||
togetherai: [],
|
togetherai: [],
|
||||||
groq: ["llama2-70b-4096", "mixtral-8x7b-32768"],
|
groq: [
|
||||||
|
"llama2-70b-4096",
|
||||||
|
"mixtral-8x7b-32768",
|
||||||
|
"llama3-8b-8192",
|
||||||
|
"llama3-70b-8192",
|
||||||
|
"gemma-7b-it",
|
||||||
|
],
|
||||||
native: [],
|
native: [],
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -40,20 +40,31 @@ class GroqLLM {
|
|||||||
streamingEnabled() {
|
streamingEnabled() {
|
||||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||||
}
|
}
|
||||||
|
|
||||||
promptWindowLimit() {
|
promptWindowLimit() {
|
||||||
switch (this.model) {
|
switch (this.model) {
|
||||||
case "llama2-70b-4096":
|
case "llama2-70b-4096":
|
||||||
return 4096;
|
return 4096;
|
||||||
case "mixtral-8x7b-32768":
|
case "mixtral-8x7b-32768":
|
||||||
return 32_768;
|
return 32_768;
|
||||||
|
case "llama3-8b-8192":
|
||||||
|
return 8192;
|
||||||
|
case "llama3-70b-8192":
|
||||||
|
return 8192;
|
||||||
|
case "gemma-7b-it":
|
||||||
|
return 8192;
|
||||||
default:
|
default:
|
||||||
return 4096;
|
return 4096;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async isValidChatCompletionModel(modelName = "") {
|
async isValidChatCompletionModel(modelName = "") {
|
||||||
const validModels = ["llama2-70b-4096", "mixtral-8x7b-32768"];
|
const validModels = [
|
||||||
|
"llama2-70b-4096",
|
||||||
|
"mixtral-8x7b-32768",
|
||||||
|
"llama3-8b-8192",
|
||||||
|
"llama3-70b-8192",
|
||||||
|
"gemma-7b-it",
|
||||||
|
];
|
||||||
const isPreset = validModels.some((model) => modelName === model);
|
const isPreset = validModels.some((model) => modelName === model);
|
||||||
if (isPreset) return true;
|
if (isPreset) return true;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user