diff --git a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
index 9f6f38f2718..c9b38ed275e 100644
--- a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
+++ b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
@@ -1,4 +1,10 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
export default function GroqAiOptions({ settings }) {
+ const [inputValue, setInputValue] = useState(settings?.GroqApiKey);
+ const [apiKey, setApiKey] = useState(settings?.GroqApiKey);
+
return (
@@ -8,41 +14,98 @@ export default function GroqAiOptions({ settings }) {
setInputValue(e.target.value)}
+ onBlur={() => setApiKey(inputValue)}
/>
{!settings?.credentialsOnly && (
-
-
-
+ );
+}
+
+function GroqAIModelSelection({ apiKey, settings }) {
+ const [customModels, setCustomModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!apiKey) {
+ setCustomModels([]);
+ setLoading(true);
+ return;
+ }
+
+ try {
+ setLoading(true);
+ const { models } = await System.customModels("groq", apiKey);
+ setCustomModels(models || []);
+ } catch (error) {
+ console.error("Failed to fetch custom models:", error);
+ setCustomModels([]);
+ } finally {
+ setLoading(false);
+ }
+ }
+ findCustomModels();
+ }, [apiKey]);
+
+ if (loading) {
+ return (
+
+
+
+
+ Enter a valid API key to view all available models for your account.
+
+
+ );
+ }
+
+ return (
+
+
+
-
- )}
+
+ )}
+
+
+ Select the GroqAI model you want to use for your conversations.
+
);
}
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index e118f5e742b..064ad17c889 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -32,12 +32,7 @@ const PROVIDER_DEFAULT_MODELS = {
localai: [],
ollama: [],
togetherai: [],
- groq: [
- "mixtral-8x7b-32768",
- "llama3-8b-8192",
- "llama3-70b-8192",
- "gemma-7b-it",
- ],
+ groq: [],
native: [],
cohere: [
"command-r",
diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js
index 067c60c7acb..ccfc647a041 100644
--- a/server/utils/AiProviders/groq/index.js
+++ b/server/utils/AiProviders/groq/index.js
@@ -13,7 +13,7 @@ class GroqLLM {
apiKey: process.env.GROQ_API_KEY,
});
this.model =
- modelPreference || process.env.GROQ_MODEL_PREF || "llama3-8b-8192";
+ modelPreference || process.env.GROQ_MODEL_PREF || "llama-3.1-8b-instant";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@@ -42,34 +42,24 @@ class GroqLLM {
promptWindowLimit() {
switch (this.model) {
- case "mixtral-8x7b-32768":
- return 32_768;
- case "llama3-8b-8192":
- return 8192;
- case "llama3-70b-8192":
- return 8192;
+ case "gemma2-9b-it":
case "gemma-7b-it":
+ case "llama3-70b-8192":
+ case "llama3-8b-8192":
return 8192;
+ case "llama-3.1-70b-versatile":
+ case "llama-3.1-8b-instant":
+ case "llama-3.1-8b-instant":
+ return 131072;
+ case "mixtral-8x7b-32768":
+ return 32768;
default:
return 8192;
}
}
async isValidChatCompletionModel(modelName = "") {
- const validModels = [
- "mixtral-8x7b-32768",
- "llama3-8b-8192",
- "llama3-70b-8192",
- "gemma-7b-it",
- ];
- const isPreset = validModels.some((model) => modelName === model);
- if (isPreset) return true;
-
- const model = await this.openai.models
- .retrieve(modelName)
- .then((modelObj) => modelObj)
- .catch(() => null);
- return !!model;
+ return !!modelName; // name just needs to exist
}
constructPrompt({
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 31a3eb2c029..27afa150f5a 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -1,7 +1,4 @@
-const {
- OpenRouterLLM,
- fetchOpenRouterModels,
-} = require("../AiProviders/openRouter");
+const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
const { perplexityModels } = require("../AiProviders/perplexity");
const { togetherAiModels } = require("../AiProviders/togetherAi");
const { ElevenLabsTTS } = require("../TextToSpeech/elevenLabs");
@@ -18,6 +15,7 @@ const SUPPORT_CUSTOM_MODELS = [
"koboldcpp",
"litellm",
"elevenlabs-tts",
+ "groq",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -49,6 +47,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await liteLLMModels(basePath, apiKey);
case "elevenlabs-tts":
return await getElevenLabsModels(apiKey);
+ case "groq":
+ return await getGroqAiModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -167,6 +167,33 @@ async function localAIModels(basePath = null, apiKey = null) {
return { models, error: null };
}
+async function getGroqAiModels(_apiKey = null) {
+ const { OpenAI: OpenAIApi } = require("openai");
+ const apiKey =
+ _apiKey === true
+ ? process.env.GROQ_API_KEY
+ : _apiKey || process.env.GROQ_API_KEY || null;
+ const openai = new OpenAIApi({
+ baseURL: "https://api.groq.com/openai/v1",
+ apiKey,
+ });
+ const models = (
+ await openai.models
+ .list()
+ .then((results) => results.data)
+ .catch((e) => {
+ console.error(`GroqAi:listModels`, e.message);
+ return [];
+ })
+ ).filter(
+ (model) => !model.id.includes("whisper") && !model.id.includes("tool-use")
+ );
+
+ // Api Key was successful so lets save it for future uses
+ if (models.length > 0 && !!apiKey) process.env.GROQ_API_KEY = apiKey;
+ return { models, error: null };
+}
+
async function liteLLMModels(basePath = null, apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({