diff --git a/frontend/src/components/LLMSelection/BurnCloudOptions/index.jsx b/frontend/src/components/LLMSelection/BurnCloudOptions/index.jsx
new file mode 100644
index 00000000000..2632c4ba2bc
--- /dev/null
+++ b/frontend/src/components/LLMSelection/BurnCloudOptions/index.jsx
@@ -0,0 +1,172 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function BurnCloudOptions({ settings }) {
+ const [inputValue, setInputValue] = useState(settings?.BurnCloudApiKey);
+ const [burnCloudApiKey, setBurnCloudApiKey] = useState(
+ settings?.BurnCloudApiKey
+ );
+
+ return (
+
+
+
+
+ setInputValue(e.target.value)}
+ onBlur={() => setBurnCloudApiKey(inputValue)}
+ />
+
+
+
+
+
+
+
+ {!settings?.credentialsOnly && (
+
+ )}
+
+
+ );
+}
+
+const DEFAULT_MODELS = [
+ // Claude Models
+ {
+ id: "claude-sonnet-4-20250514",
+ name: "Claude Sonnet 4",
+ },
+ {
+ id: "claude-3-7-sonnet-20250219",
+ name: "Claude 3.7 Sonnet",
+ },
+ {
+ id: "claude-3-5-sonnet-20241022",
+ name: "Claude 3.5 Sonnet",
+ },
+ // GPT Models
+ {
+ id: "gpt-4o",
+ name: "GPT-4o",
+ },
+ {
+ id: "gpt-4o-mini",
+ name: "GPT-4o Mini",
+ },
+ {
+ id: "o1",
+ name: "GPT-o1",
+ },
+ {
+ id: "gpt-4.5-preview",
+ name: "GPT-4.5 Preview",
+ },
+ {
+ id: "o1-mini",
+ name: "GPT-o1 Mini",
+ },
+ {
+ id: "gpt-image-1",
+ name: "GPT Image 1",
+ },
+ // Gemini Models
+ {
+ id: "gemini-2.5-pro-preview-05-06",
+ name: "Gemini 2.5 Pro Preview",
+ },
+ // DeepSeek Models
+ {
+ id: "deepseek-r1",
+ name: "DeepSeek R1",
+ },
+ {
+ id: "deepseek-v3",
+ name: "DeepSeek V3",
+ },
+];
+
+function BurnCloudModelSelection({ apiKey, settings }) {
+ const [models, setModels] = useState(DEFAULT_MODELS);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ setLoading(true);
+ const { models } = await System.customModels(
+ "burncloud",
+ typeof apiKey === "boolean" ? null : apiKey
+ );
+ if (models.length > 0) setModels(models);
+ setLoading(false);
+ }
+ findCustomModels();
+ }, [apiKey]);
+
+ if (loading) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/burncloud.png b/frontend/src/media/llmprovider/burncloud.png
new file mode 100644
index 00000000000..65748adfd49
Binary files /dev/null and b/frontend/src/media/llmprovider/burncloud.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index f9aceec189f..d75e8507bf0 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -32,6 +32,7 @@ import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
+import BurnCloudLogo from "@/media/llmprovider/burncloud.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -61,6 +62,7 @@ import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
+import BurnCloudOptions from "@/components/LLMSelection/BurnCloudOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -91,6 +93,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "A friendly AI Assistant hosted by Anthropic.",
requiredConfig: ["AnthropicApiKey"],
},
+ {
+ name: "BurnCloud",
+ value: "burncloud",
+ logo: BurnCloudLogo,
+ options: (settings) => ,
+ description:
+ "Multi-model AI platform supporting Claude, GPT, Gemini, and DeepSeek models.",
+ requiredConfig: ["BurnCloudApiKey"],
+ },
{
name: "Gemini",
value: "gemini",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index bc48209da9c..bc5dd51d695 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -5,6 +5,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
+import BurnCloudLogo from "@/media/llmprovider/burncloud.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
@@ -68,6 +69,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: AnthropicLogo,
},
+ burncloud: {
+ name: "BurnCloud",
+ description: [
+ "Your chats will not be used for training",
+ "Your prompts and document text used in response creation are visible to BurnCloud",
+ ],
+ logo: BurnCloudLogo,
+ },
gemini: {
name: "Google Gemini",
description: [
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 02d97893a79..b56f68f3839 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -4,6 +4,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
+import BurnCloudLogo from "@/media/llmprovider/burncloud.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
@@ -32,6 +33,7 @@ import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
+import BurnCloudOptions from "@/components/LLMSelection/BurnCloudOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
@@ -85,6 +87,14 @@ const LLMS = [
options: (settings) => ,
description: "A friendly AI Assistant hosted by Anthropic.",
},
+ {
+ name: "BurnCloud",
+ value: "burncloud",
+ logo: BurnCloudLogo,
+ options: (settings) => ,
+ description:
+ "Multi-model AI platform supporting Claude, GPT, Gemini, and DeepSeek models.",
+ },
{
name: "Gemini",
value: "gemini",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 6baae1ddee5..bcc0e26f164 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -9,6 +9,7 @@ import { useTranslation } from "react-i18next";
const ENABLED_PROVIDERS = [
"openai",
"anthropic",
+ "burncloud",
"lmstudio",
"ollama",
"localai",
diff --git a/server/endpoints/utils.js b/server/endpoints/utils.js
index cf4183251b8..8db1eacdc9c 100644
--- a/server/endpoints/utils.js
+++ b/server/endpoints/utils.js
@@ -76,6 +76,9 @@ function getModelTag() {
case "anthropic":
model = process.env.ANTHROPIC_MODEL_PREF;
break;
+ case "burncloud":
+ model = process.env.BURNCLOUD_MODEL_PREF;
+ break;
case "lmstudio":
model = process.env.LMSTUDIO_MODEL_PREF;
break;
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index bb7311fb857..48b73ec7eb7 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -461,6 +461,13 @@ const SystemSettings = {
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
+ // BurnCloud Keys
+ BurnCloudApiKey: !!process.env.BURNCLOUD_API_KEY,
+ BurnCloudModelPref:
+ process.env.BURNCLOUD_MODEL_PREF || "claude-3-5-sonnet-20241022",
+ BurnCloudBaseUrl:
+ process.env.BURNCLOUD_BASE_URL || "https://ai.burncloud.com/v1",
+
// Gemini Keys
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
GeminiLLMModelPref:
diff --git a/server/utils/AiProviders/burncloud/index.js b/server/utils/AiProviders/burncloud/index.js
new file mode 100644
index 00000000000..0367972ba95
--- /dev/null
+++ b/server/utils/AiProviders/burncloud/index.js
@@ -0,0 +1,200 @@
+const { v4 } = require("uuid");
+const {
+ formatChatHistory,
+ handleDefaultStreamResponseV2,
+} = require("../../helpers/chat/responses");
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const { MODEL_MAP } = require("../modelMap");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+
+class BurnCloudLLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.BURNCLOUD_API_KEY)
+ throw new Error("No BurnCloud API key was set.");
+
+ // Initialize BurnCloud client (using OpenAI-compatible interface)
+ const { OpenAI } = require("openai");
+ const burncloud = new OpenAI({
+ apiKey: process.env.BURNCLOUD_API_KEY,
+ baseURL: process.env.BURNCLOUD_BASE_URL || "https://ai.burncloud.com/v1",
+ });
+
+ this.burncloud = burncloud;
+ this.model =
+ modelPreference ||
+ process.env.BURNCLOUD_MODEL_PREF ||
+ "claude-3-5-sonnet-20241022";
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+ this.log(`Initialized with ${this.model}`);
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[BurnCloudLLM]\x1b[0m ${text}`, ...args);
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ static promptWindowLimit(modelName) {
+ // Map different model families to their context windows
+ if (modelName?.includes("claude-sonnet-4")) return 200000;
+ if (modelName?.includes("claude-3-7-sonnet")) return 200000;
+ if (modelName?.includes("claude-3-5-sonnet")) return 200000;
+ if (modelName?.includes("gpt-4o")) return 128000;
+ if (modelName?.includes("gpt-4")) return 128000;
+ if (modelName?.includes("o1")) return 200000;
+ if (modelName?.includes("gemini-2.5-pro")) return 2000000;
+ if (modelName?.includes("deepseek")) return 64000;
+ return 100000; // Default fallback
+ }
+
+ promptWindowLimit() {
+ return BurnCloudLLM.promptWindowLimit(this.model);
+ }
+
+ isValidChatCompletionModel(_modelName = "") {
+ return true;
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${
+ contextTexts.length > 0
+ ? `\n\nContext:\n${contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")}`
+ : ""
+ }`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 } = {}) {
+ try {
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.burncloud.chat.completions.create({
+ model: this.model,
+ messages: this.#convertToOpenAIFormat(messages),
+ temperature: Number(temperature ?? this.defaultTemp),
+ max_tokens: 4096,
+ })
+ );
+
+ const choice = result.output.choices?.[0];
+ if (!choice) throw new Error("No valid completion response");
+
+ const promptTokens = result.output.usage?.prompt_tokens || 0;
+ const completionTokens = result.output.usage?.completion_tokens || 0;
+
+ return {
+ textResponse: choice.message?.content || "",
+ metrics: {
+ prompt_tokens: promptTokens,
+ completion_tokens: completionTokens,
+ total_tokens: promptTokens + completionTokens,
+ outputTps: completionTokens / result.duration,
+ duration: result.duration,
+ },
+ };
+ } catch (error) {
+ console.log(error);
+ return { textResponse: error.message, metrics: {} };
+ }
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 } = {}) {
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.burncloud.chat.completions.create({
+ model: this.model,
+ messages: this.#convertToOpenAIFormat(messages),
+ temperature: Number(temperature ?? this.defaultTemp),
+ max_tokens: 4096,
+ stream: true,
+ }),
+ messages,
+ false
+ );
+
+ return measuredStreamRequest;
+ }
+
+ /**
+ * Convert messages to OpenAI format
+ * @param {Array} messages - The messages array
+ * @returns {Array} - OpenAI formatted messages
+ */
+ #convertToOpenAIFormat(messages = []) {
+ if (!Array.isArray(messages) || messages.length === 0) return [];
+
+ // Handle system message separately for Anthropic-style models
+ const systemMessage = messages.find((msg) => msg.role === "system");
+ const otherMessages = messages.filter((msg) => msg.role !== "system");
+
+ const formattedMessages = otherMessages.map((msg) => ({
+ role: msg.role,
+ content: msg.content,
+ }));
+
+ // For OpenAI format, system message goes first
+ if (systemMessage) {
+ return [
+ { role: "system", content: systemMessage.content },
+ ...formattedMessages,
+ ];
+ }
+
+ return formattedMessages;
+ }
+
+ /**
+ * Handles the stream response from the BurnCloud API.
+ * @param {Object} response - the response object
+ * @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the BurnCloud API w/tracking
+ * @param {Object} responseProps - the response properties
+ * @returns {Promise}
+ */
+ handleStream(response, stream, responseProps) {
+ return handleDefaultStreamResponseV2(response, stream, responseProps);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageStringCompressor } = require("../../helpers/chat");
+ const compressedPrompt = await messageStringCompressor(
+ this,
+ promptArgs,
+ rawHistory
+ );
+ return compressedPrompt;
+ }
+
+ // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+}
+
+module.exports = {
+ BurnCloudLLM,
+};
diff --git a/server/utils/AiProviders/modelMap/legacy.js b/server/utils/AiProviders/modelMap/legacy.js
index 2faf99dc237..7b685a0ffc5 100644
--- a/server/utils/AiProviders/modelMap/legacy.js
+++ b/server/utils/AiProviders/modelMap/legacy.js
@@ -15,6 +15,29 @@ const LEGACY_MODEL_MAP = {
"claude-3-7-sonnet-20250219": 200000,
"claude-3-7-sonnet-latest": 200000,
},
+ burncloud: {
+ // Claude Models
+ "claude-sonnet-4-20250514": 200000,
+ "claude-3-7-sonnet-20250219": 200000,
+ "claude-3-5-sonnet-20241022": 200000,
+
+ // GPT Models
+ "gpt-4o": 128000,
+ "gpt-4o-mini": 128000,
+ o1: 200000,
+ "gpt-4.5-preview": 128000,
+ "o1-mini": 128000,
+
+ // GPT Image Model
+ "gpt-image-1": 128000,
+
+ // Gemini Models
+ "gemini-2.5-pro-preview-05-06": 2000000,
+
+ // DeepSeek Models
+ "deepseek-r1": 64000,
+ "deepseek-v3": 64000,
+ },
cohere: {
"command-r": 128000,
"command-r-plus": 128000,
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 5085fdcf5e1..fa56a1da82d 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -60,6 +60,15 @@ class Provider {
apiKey: process.env.ANTHROPIC_API_KEY,
...config,
});
+ case "burncloud":
+ return new ChatOpenAI({
+ configuration: {
+ baseURL:
+ process.env.BURNCLOUD_BASE_URL || "https://ai.burncloud.com/v1",
+ },
+ apiKey: process.env.BURNCLOUD_API_KEY,
+ ...config,
+ });
case "groq":
return new ChatOpenAI({
configuration: {
diff --git a/server/utils/agents/aibitat/providers/burncloud.js b/server/utils/agents/aibitat/providers/burncloud.js
new file mode 100644
index 00000000000..8dcf27a40d1
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/burncloud.js
@@ -0,0 +1,105 @@
+const { OpenAI } = require("openai");
+const { RetryError } = require("../error.js");
+const Provider = require("./ai-provider.js");
+
+/**
+ * The agent provider for the BurnCloud API.
+ * Using OpenAI-compatible interface for broad model support.
+ */
+class BurnCloudProvider extends Provider {
+ model;
+
+ constructor(config = {}) {
+ const {
+ options = {
+ apiKey: process.env.BURNCLOUD_API_KEY,
+ baseURL:
+ process.env.BURNCLOUD_BASE_URL || "https://ai.burncloud.com/v1",
+ },
+ model = "claude-3-5-sonnet-20241022",
+ } = config;
+
+ const client = new OpenAI(options);
+ super(client);
+ this.model = model;
+ }
+
+ get requiredEnvs() {
+ return ["BURNCLOUD_API_KEY"];
+ }
+
+ /**
+ * Create a completion based on the received messages.
+ *
+ * @param messages A list of messages to send to the BurnCloud API.
+ * @param functions
+ * @returns The completion.
+ */
+ async complete(messages, functions = []) {
+ try {
+ const response = await this.client.chat.completions.create({
+ model: this.model,
+ messages: this.#cleanMsgs(messages),
+ ...(Array.isArray(functions) && functions?.length > 0
+ ? {
+ tools: functions.map((func) => ({
+ type: "function",
+ function: func,
+ })),
+ }
+ : {}),
+ });
+
+ // Handle tool/function calls
+ if (response.choices?.[0]?.message?.tool_calls) {
+ const toolCall = response.choices[0].message.tool_calls[0];
+ return {
+ result: null,
+ functionCall: {
+ name: toolCall.function.name,
+ arguments: JSON.parse(toolCall.function.arguments),
+ },
+ cost: 0,
+ };
+ }
+
+ return {
+ result:
+ response.choices?.[0]?.message?.content || "No response generated.",
+ cost: 0,
+ };
+ } catch (error) {
+ // Handle different types of errors appropriately
+ if (error?.status === 401) throw error;
+ if (error?.status === 429 || error?.status >= 500) {
+ throw new RetryError(error.message);
+ }
+ throw error;
+ }
+ }
+
+ /**
+ * Get the cost of the completion.
+ *
+ * @param _usage The completion to get the cost for.
+ * @returns The cost of the completion.
+ * Stubbed since BurnCloud may have different pricing models
+ */
+ getCost(_usage) {
+ return 0;
+ }
+
+ /**
+ * Cleans the messages array by removing any invalid messages.
+ * @param {Array} messages - The messages array to clean.
+ * @returns {Array} The cleaned messages array.
+ */
+ #cleanMsgs(messages = []) {
+ return messages.map((msg) => {
+ if (typeof msg.content === "string") return msg;
+ return { role: msg.role, content: JSON.stringify(msg.content) };
+ });
+ }
+}
+
+module.exports = BurnCloudProvider;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index d8c174862e4..47e33e06f56 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -23,6 +23,7 @@ const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
const GeminiProvider = require("./gemini.js");
const DellProAiStudioProvider = require("./dellProAiStudio.js");
+const BurnCloudProvider = require("./burncloud.js");
module.exports = {
OpenAIProvider,
@@ -50,4 +51,5 @@ module.exports = {
PPIOProvider,
GeminiProvider,
DellProAiStudioProvider,
+ BurnCloudProvider,
};
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 915e5a59bed..1eebcfd8478 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -199,6 +199,10 @@ class AgentHandler {
"Dell Pro AI Studio model must be set to use agents."
);
break;
+ case "burncloud":
+ if (!process.env.BURNCLOUD_MODEL_PREF)
+ throw new Error("BurnCloud model must be provided to use agents.");
+ break;
default:
throw new Error(
@@ -219,6 +223,8 @@ class AgentHandler {
return process.env.OPEN_MODEL_PREF ?? "gpt-4o";
case "anthropic":
return process.env.ANTHROPIC_MODEL_PREF ?? "claude-3-sonnet-20240229";
+ case "burncloud":
+ return process.env.BURNCLOUD_MODEL_PREF ?? "claude-3-5-sonnet-20241022";
case "lmstudio":
return process.env.LMSTUDIO_MODEL_PREF ?? "server-default";
case "ollama":
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index cff97ff888f..2ba24abb8bf 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -13,6 +13,7 @@ const { GeminiLLM } = require("../AiProviders/gemini");
const SUPPORT_CUSTOM_MODELS = [
"openai",
"anthropic",
+ "burncloud",
"localai",
"ollama",
"togetherai",
@@ -84,6 +85,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getPPIOModels(apiKey);
case "dpais":
return await getDellProAiStudioModels(basePath);
+ case "burncloud":
+ return await burncloudModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -675,6 +678,65 @@ async function getDellProAiStudioModels(basePath = null) {
}
}
+async function burncloudModels(_apiKey = null) {
+ const apiKey =
+ _apiKey === true
+ ? process.env.BURNCLOUD_API_KEY
+ : _apiKey || process.env.BURNCLOUD_API_KEY || null;
+
+ if (!apiKey) return { models: [], error: "No API key provided" };
+
+ try {
+ const { OpenAI } = require("openai");
+ const burncloud = new OpenAI({
+ apiKey,
+ baseURL: process.env.BURNCLOUD_BASE_URL || "https://ai.burncloud.com/v1",
+ });
+
+ const models = await burncloud.models
+ .list()
+ .then((results) => results.data)
+ .then((models) => {
+ return models
+ .filter((model) => model.object === "model")
+ .map((model) => {
+ return {
+ id: model.id,
+ name: model.id, // Use model ID as display name
+ };
+ });
+ })
+ .catch((e) => {
+ console.error(`BurnCloud:listModels`, e.message);
+ // Return default models if API call fails
+ return [
+ { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4" },
+ { id: "claude-3-7-sonnet-20250219", name: "Claude 3.7 Sonnet" },
+ { id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet" },
+ { id: "gpt-4o", name: "GPT-4o" },
+ { id: "gpt-4o-mini", name: "GPT-4o Mini" },
+ { id: "o1", name: "GPT-o1" },
+ { id: "gpt-4.5-preview", name: "GPT-4.5 Preview" },
+ { id: "o1-mini", name: "GPT-o1 Mini" },
+ { id: "gpt-image-1", name: "GPT Image 1" },
+ {
+ id: "gemini-2.5-pro-preview-05-06",
+ name: "Gemini 2.5 Pro Preview",
+ },
+ { id: "deepseek-r1", name: "DeepSeek R1" },
+ { id: "deepseek-v3", name: "DeepSeek V3" },
+ ];
+ });
+
+ // Api Key was successful so lets save it for future uses
+ if (models.length > 0 && !!apiKey) process.env.BURNCLOUD_API_KEY = apiKey;
+ return { models, error: null };
+ } catch (error) {
+ console.error(`BurnCloud:listModels`, error.message);
+ return { models: [], error: error.message };
+ }
+}
+
module.exports = {
getCustomModels,
};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 2017c618fac..2432212c6c5 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -134,6 +134,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return new AnthropicLLM(embedder, model);
+ case "burncloud":
+ const { BurnCloudLLM } = require("../AiProviders/burncloud");
+ return new BurnCloudLLM(embedder, model);
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder, model);
@@ -281,6 +284,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return AnthropicLLM;
+ case "burncloud":
+ const { BurnCloudLLM } = require("../AiProviders/burncloud");
+ return BurnCloudLLM;
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return GeminiLLM;
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index f209ef450cc..1e5fd68edde 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -59,6 +59,20 @@ const KEY_MAPPING = {
checks: [isNotEmpty],
},
+ // BurnCloud Settings
+ BurnCloudApiKey: {
+ envKey: "BURNCLOUD_API_KEY",
+ checks: [isNotEmpty],
+ },
+ BurnCloudModelPref: {
+ envKey: "BURNCLOUD_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
+ BurnCloudBaseUrl: {
+ envKey: "BURNCLOUD_BASE_URL",
+ checks: [isValidURL],
+ },
+
GeminiLLMApiKey: {
envKey: "GEMINI_API_KEY",
checks: [isNotEmpty],
@@ -756,6 +770,7 @@ function supportedLLM(input = "") {
"openai",
"azure",
"anthropic",
+ "burncloud",
"gemini",
"lmstudio",
"localai",