diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 8f137805533..007980f2185 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -31,6 +31,7 @@ import APIPieLogo from "@/media/llmprovider/apipie.png";
import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
+import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -59,6 +60,7 @@ import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
+import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -128,13 +130,17 @@ export const AVAILABLE_LLM_PROVIDERS = [
requiredConfig: ["OllamaLLMBasePath"],
},
{
- name: "Novita AI",
- value: "novita",
- logo: NovitaLogo,
- options: (settings) => ,
+ name: "Dell Pro AI Studio",
+ value: "dpais",
+ logo: DellProAiStudioLogo,
+ options: (settings) => ,
description:
- "Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
- requiredConfig: ["NovitaLLMApiKey"],
+ "Run powerful LLMs quickly on NPU powered by Dell Pro AI Studio.",
+ requiredConfig: [
+ "DellProAiStudioBasePath",
+ "DellProAiStudioModelPref",
+ "DellProAiStudioTokenLimit",
+ ],
},
{
name: "LM Studio",
@@ -153,6 +159,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run LLMs locally on your own machine.",
requiredConfig: ["LocalAiApiKey", "LocalAiBasePath", "LocalAiTokenLimit"],
},
+ {
+ name: "Novita AI",
+ value: "novita",
+ logo: NovitaLogo,
+ options: (settings) => ,
+ description:
+ "Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
+ requiredConfig: ["NovitaLLMApiKey"],
+ },
{
name: "Together AI",
value: "togetherai",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 40d8bfaaec7..bc48209da9c 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -37,6 +37,7 @@ import MilvusLogo from "@/media/vectordbs/milvus.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import PGVectorLogo from "@/media/vectordbs/pgvector.png";
+import DPAISLogo from "@/media/llmprovider/dpais.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom";
@@ -234,6 +235,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: PPIOLogo,
},
+ dpais: {
+ name: "Dell Pro AI Studio",
+ description: [
+ "Your model and chat contents are only accessible on the computer running Dell Pro AI Studio",
+ ],
+ logo: DPAISLogo,
+ },
};
export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 16f15675723..02d97893a79 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -26,6 +26,7 @@ import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
+import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -53,6 +54,7 @@ import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
+import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@@ -114,12 +116,12 @@ const LLMS = [
description: "Run LLMs locally on your own machine.",
},
{
- name: "Novita AI",
- value: "novita",
- logo: NovitaLogo,
- options: (settings) => ,
+ name: "Dell Pro AI Studio",
+ value: "dpais",
+ logo: DellProAiStudioLogo,
+ options: (settings) => ,
description:
- "Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
+ "Run powerful LLMs quickly on NPU powered by Dell Pro AI Studio.",
},
{
name: "LM Studio",
@@ -136,6 +138,14 @@ const LLMS = [
options: (settings) => ,
description: "Run LLMs locally on your own machine.",
},
+ {
+ name: "Novita AI",
+ value: "novita",
+ logo: NovitaLogo,
+ options: (settings) => ,
+ description:
+ "Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
+ },
{
name: "KoboldCPP",
value: "koboldcpp",
diff --git a/frontend/src/utils/constants.js b/frontend/src/utils/constants.js
index 71f3048b72f..c6a44d2ae30 100644
--- a/frontend/src/utils/constants.js
+++ b/frontend/src/utils/constants.js
@@ -37,6 +37,13 @@ export const LOCALAI_COMMON_URLS = [
"http://172.17.0.1:8080/v1",
];
+export const DPAIS_COMMON_URLS = [
+ "http://127.0.0.1:8553/v1",
+ "http://0.0.0.0:8553/v1",
+ "http://localhost:8553/v1",
+ "http://host.docker.internal:8553/v1",
+];
+
export const NVIDIA_NIM_COMMON_URLS = [
"http://127.0.0.1:8000/v1/version",
"http://localhost:8000/v1/version",
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 01fe0a743d5..89e397ea64e 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -574,6 +574,12 @@ const SystemSettings = {
// PPIO API keys
PPIOApiKey: !!process.env.PPIO_API_KEY,
PPIOModelPref: process.env.PPIO_MODEL_PREF,
+
+ // Dell Pro AI Studio Keys
+ DellProAiStudioBasePath: process.env.DPAIS_LLM_BASE_PATH,
+ DellProAiStudioModelPref: process.env.DPAIS_LLM_MODEL_PREF,
+ DellProAiStudioTokenLimit:
+ process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT ?? 4096,
};
},
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index a479ba38a20..ff1aab45b06 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -797,6 +797,8 @@ ${this.getHistory({ to: route.to })
return new Providers.PPIOProvider({ model: config.model });
case "gemini":
return new Providers.GeminiProvider({ model: config.model });
+ case "dpais":
+ return new Providers.DellProAiStudioProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index d98e49c34a6..d8c174862e4 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -22,6 +22,7 @@ const NovitaProvider = require("./novita.js");
const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
const GeminiProvider = require("./gemini.js");
+const DellProAiStudioProvider = require("./dellProAiStudio.js");
module.exports = {
OpenAIProvider,
@@ -48,4 +49,5 @@ module.exports = {
NvidiaNimProvider,
PPIOProvider,
GeminiProvider,
+ DellProAiStudioProvider,
};
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index dc30b29156c..915e5a59bed 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -189,6 +189,16 @@ class AgentHandler {
if (!process.env.GEMINI_API_KEY)
throw new Error("Gemini API key must be provided to use agents.");
break;
+ case "dpais":
+ if (!process.env.DPAIS_LLM_BASE_PATH)
+ throw new Error(
+ "Dell Pro AI Studio base path must be provided to use agents."
+ );
+ if (!process.env.DPAIS_LLM_MODEL_PREF)
+ throw new Error(
+ "Dell Pro AI Studio model must be set to use agents."
+ );
+ break;
default:
throw new Error(
@@ -256,6 +266,8 @@ class AgentHandler {
return process.env.PPIO_MODEL_PREF ?? "qwen/qwen2.5-32b-instruct";
case "gemini":
return process.env.GEMINI_LLM_MODEL_PREF ?? "gemini-2.0-flash-lite";
+ case "dpais":
+ return process.env.DPAIS_LLM_MODEL_PREF;
default:
return null;
}
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index c0c6533f948..cff97ff888f 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -32,6 +32,7 @@ const SUPPORT_CUSTOM_MODELS = [
"xai",
"gemini",
"ppio",
+ "dpais",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -81,6 +82,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getGeminiModels(apiKey);
case "ppio":
return await getPPIOModels(apiKey);
+ case "dpais":
+ return await getDellProAiStudioModels(basePath);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -635,6 +638,43 @@ async function getPPIOModels() {
return { models, error: null };
}
+async function getDellProAiStudioModels(basePath = null) {
+ const { OpenAI: OpenAIApi } = require("openai");
+ try {
+ const { origin } = new URL(
+ basePath || process.env.DELL_PRO_AI_STUDIO_BASE_PATH
+ );
+ const openai = new OpenAIApi({
+ baseURL: `${origin}/v1/openai`,
+ apiKey: null,
+ });
+ const models = await openai.models
+ .list()
+ .then((results) => results.data)
+ .then((models) => {
+ return models
+ .filter((model) => model.capability === "TextToText") // Only include text-to-text models for this handler
+ .map((model) => {
+ return {
+ id: model.id,
+ name: model.name,
+ organization: model.owned_by,
+ };
+ });
+ })
+ .catch((e) => {
+ throw new Error(e.message);
+ });
+ return { models, error: null };
+ } catch (e) {
+ console.error(`getDellProAiStudioModels`, e.message);
+ return {
+ models: [],
+ error: "Could not reach Dell Pro Ai Studio from the provided base path",
+ };
+ }
+}
+
module.exports = {
getCustomModels,
};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index cedbba2c654..a069b0dd3ca 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -203,6 +203,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return new PPIOLLM(embedder, model);
+ case "dpais":
+ const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
+ return new DellProAiStudioLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -347,6 +350,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return PPIOLLM;
+ case "dpais":
+ const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
+ return DellProAiStudioLLM;
default:
return null;
}
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index cd43b8bfce1..b69c96417f6 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -262,6 +262,20 @@ const KEY_MAPPING = {
checks: [nonZero],
},
+ // Dell Pro AI Studio Settings
+ DellProAiStudioBasePath: {
+ envKey: "DPAIS_LLM_BASE_PATH",
+ checks: [isNotEmpty, validDockerizedUrl],
+ },
+ DellProAiStudioModelPref: {
+ envKey: "DPAIS_LLM_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
+ DellProAiStudioTokenLimit: {
+ envKey: "DPAIS_LLM_MODEL_TOKEN_LIMIT",
+ checks: [nonZero],
+ },
+
EmbeddingEngine: {
envKey: "EMBEDDING_ENGINE",
checks: [supportedEmbeddingModel],
@@ -765,6 +779,7 @@ function supportedLLM(input = "") {
"xai",
"nvidia-nim",
"ppio",
+ "dpais",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}