diff --git a/README.md b/README.md
index 65c529b1ab..a43e0bcada 100644
--- a/README.md
+++ b/README.md
@@ -102,6 +102,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
- [Moonshot AI](https://www.moonshot.ai/)
+- [Jan AI](https://jan.ai/)
**Embedder models:**
diff --git a/docker/.env.example b/docker/.env.example
index 7244bdff12..4d0619ba7e 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -137,6 +137,12 @@ GID='1000'
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
+# LLM_PROVIDER='janai'
+# JAN_AI_API_KEY='your-jan-ai-api-key-here'
+# JAN_AI_BASE_PATH='http://127.0.0.1:1337/v1'
+# JAN_AI_MODEL_PREF='gemma3:1b'
+# JAN_AI_MODEL_TOKEN_LIMIT=4096
+
###########################################
######## Embedding API SElECTION ##########
###########################################
diff --git a/frontend/src/components/LLMSelection/JanAiOptions/index.jsx b/frontend/src/components/LLMSelection/JanAiOptions/index.jsx
new file mode 100644
index 0000000000..eea5e67868
--- /dev/null
+++ b/frontend/src/components/LLMSelection/JanAiOptions/index.jsx
@@ -0,0 +1,193 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+import { CaretDown, CaretUp } from "@phosphor-icons/react";
+import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery";
+import { JAN_AI_COMMON_URLS } from "@/utils/constants";
+
+export default function JanAiOptions({ settings }) {
+ const [inputValue, setInputValue] = useState(settings?.JanAiApiKey);
+ const [apiKey, setApiKey] = useState(settings?.JanAiApiKey);
+
+ const {
+ basePath,
+ basePathValue,
+ showAdvancedControls,
+ setShowAdvancedControls,
+ } = useProviderEndpointAutoDiscovery({
+ provider: "janai",
+ // Falls back to first common URL if no base path is set to prevent auto-detect
+ // from running with no API key (Jan AI always requires an API key)
+ initialBasePath: settings?.JanAiBasePath || JAN_AI_COMMON_URLS[0],
+ initialAuthToken: apiKey,
+ ENDPOINTS: [JAN_AI_COMMON_URLS],
+ });
+
+ return (
+
+
+
+
+ setInputValue(e.target.value)}
+ onBlur={() => setApiKey(inputValue)}
+ />
+
+ {!settings?.credentialsOnly && (
+
+ )}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Enter the URL where Jan AI is running.
+
+
+
+
+
+ );
+}
+
+function JanAiModelSelection({ apiKey, settings, basePath }) {
+ const [models, setModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [selectedModel, setSelectedModel] = useState(
+ settings?.JanAiModelPref || ""
+ );
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!apiKey || !basePath) {
+ setModels([]);
+ setLoading(false);
+ return;
+ }
+
+ try {
+ setLoading(true);
+ const { models } = await System.customModels("janai", apiKey, basePath);
+ setModels(models || []);
+
+ // If no model is selected and we have models, select the first one
+ if (!selectedModel && models?.length > 0) {
+ setSelectedModel(models[0].id);
+ }
+ } catch (error) {
+ console.error("Failed to fetch custom models:", error);
+ setModels([]);
+ } finally {
+ setLoading(false);
+ }
+ }
+ findCustomModels();
+ }, [apiKey, basePath]);
+
+ if (!apiKey || !basePath) {
+ return (
+
+
+
+
+ );
+ }
+
+ if (loading) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/janai.png b/frontend/src/media/llmprovider/janai.png
new file mode 100644
index 0000000000..49a5741680
Binary files /dev/null and b/frontend/src/media/llmprovider/janai.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 9ef12944c2..bb442e5ce5 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -32,6 +32,7 @@ import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
+import JanAiLogo from "@/media/llmprovider/janai.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import PreLoader from "@/components/Preloader";
@@ -62,6 +63,7 @@ import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
+import JanAiOptions from "@/components/LLMSelection/JanAiOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
@@ -295,6 +297,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "A unified API of AI services from leading providers",
requiredConfig: ["ApipieLLMApiKey", "ApipieLLMModelPref"],
},
+ {
+ name: "Jan AI",
+ value: "janai",
+ logo: JanAiLogo,
+ options: (settings) => ,
+ description: "Run Jan AI's local LLMs.",
+ requiredConfig: ["JanAiApiKey"],
+ },
{
name: "Moonshot AI",
value: "moonshotai",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 67be19cece..d97ac50d72 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -38,6 +38,7 @@ import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import PGVectorLogo from "@/media/vectordbs/pgvector.png";
import DPAISLogo from "@/media/llmprovider/dpais.png";
+import JanAiLogo from "@/media/llmprovider/janai.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import React, { useState, useEffect } from "react";
@@ -244,6 +245,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: DPAISLogo,
},
+ janai: {
+ name: "Jan AI",
+ description: [
+ "Your chats stay local and are not used for training",
+ "Your prompts and document text stay on your local machine",
+ ],
+ logo: JanAiLogo,
+ },
moonshotai: {
name: "Moonshot AI",
description: [
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 4ce2745d04..b9f55346e0 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -27,6 +27,7 @@ import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
+import JanAiLogo from "@/media/llmprovider/janai.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -56,6 +57,7 @@ import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
+import JanAiOptions from "@/components/LLMSelection/JanAiOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
@@ -265,6 +267,13 @@ const LLMS = [
options: (settings) => ,
description: "Run xAI's powerful LLMs like Grok-2 and more.",
},
+ {
+ name: "Jan AI",
+ value: "janai",
+ logo: JanAiLogo,
+ options: (settings) => ,
+ description: "Run models from local Jan AI server.",
+ },
{
name: "Moonshot AI",
value: "moonshotai",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 31b7327ba6..f139f3d9d8 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -31,6 +31,7 @@ const ENABLED_PROVIDERS = [
"xai",
"nvidia-nim",
"gemini",
+ "janai",
"moonshotai",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
diff --git a/frontend/src/utils/constants.js b/frontend/src/utils/constants.js
index c6a44d2ae3..e43a313930 100644
--- a/frontend/src/utils/constants.js
+++ b/frontend/src/utils/constants.js
@@ -51,6 +51,13 @@ export const NVIDIA_NIM_COMMON_URLS = [
"http://172.17.0.1:8000/v1/version",
];
+export const JAN_AI_COMMON_URLS = [
+ "http://127.0.0.1:1337/v1",
+ "http://localhost:1337/v1",
+ "http://host.docker.internal:1337/v1",
+ "http://172.17.0.1:1337/v1",
+];
+
export function fullApiUrl() {
if (API_BASE !== "/api") return API_BASE;
return `${window.location.origin}/api`;
diff --git a/server/.env.example b/server/.env.example
index df0b200826..706145cc8a 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -135,6 +135,12 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
+# LLM_PROVIDER='janai'
+# JAN_AI_API_KEY='your-jan-ai-api-key-here'
+# JAN_AI_BASE_PATH='http://127.0.0.1:1337/v1'
+# JAN_AI_MODEL_PREF='gemma3:1b'
+# JAN_AI_MODEL_TOKEN_LIMIT=4096
+
###########################################
######## Embedding API SElECTION ##########
###########################################
diff --git a/server/endpoints/utils.js b/server/endpoints/utils.js
index 425d44f95e..c6a8a16208 100644
--- a/server/endpoints/utils.js
+++ b/server/endpoints/utils.js
@@ -142,6 +142,9 @@ function getModelTag() {
case "gemini":
model = process.env.GEMINI_LLM_MODEL_PREF;
break;
+ case "janai":
+ model = process.env.JAN_AI_MODEL_PREF;
+ break;
case "moonshotai":
model = process.env.MOONSHOT_AI_MODEL_PREF;
break;
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 999e75029e..e9ebb7ab54 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -594,6 +594,12 @@ const SystemSettings = {
NvidiaNimLLMModelPref: process.env.NVIDIA_NIM_LLM_MODEL_PREF,
NvidiaNimLLMTokenLimit: process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT,
+ // Jan AI Keys
+ JanAiApiKey: !!process.env.JAN_AI_API_KEY,
+ JanAiBasePath: process.env.JAN_AI_BASE_PATH,
+ JanAiModelPref: process.env.JAN_AI_MODEL_PREF,
+ JanAiModelTokenLimit: process.env.JAN_AI_MODEL_TOKEN_LIMIT,
+
// PPIO API keys
PPIOApiKey: !!process.env.PPIO_API_KEY,
PPIOModelPref: process.env.PPIO_MODEL_PREF,
diff --git a/server/utils/AiProviders/janAi/index.js b/server/utils/AiProviders/janAi/index.js
new file mode 100644
index 0000000000..0dbcfc6357
--- /dev/null
+++ b/server/utils/AiProviders/janAi/index.js
@@ -0,0 +1,312 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+const { formatChatHistory } = require("../../helpers/chat/responses");
+const { v4: uuidv4 } = require("uuid");
+
+class JanAiLLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.JAN_AI_API_KEY)
+ throw new Error("No Jan AI API key was set.");
+ const { OpenAI: OpenAIApi } = require("openai");
+
+ this.basePath = process.env.JAN_AI_BASE_PATH || "http://127.0.0.1:1337/v1";
+ this.openai = new OpenAIApi({
+ baseURL: this.basePath,
+ apiKey: process.env.JAN_AI_API_KEY,
+ });
+ this.model = modelPreference || process.env.JAN_AI_MODEL_PREF;
+ this.contextWindow = process.env.JAN_AI_MODEL_TOKEN_LIMIT || 4096;
+ this.limits = {
+ history: this.contextWindow * 0.15,
+ system: this.contextWindow * 0.15,
+ user: this.contextWindow * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+ this.timeout = 500;
+
+ // Get model context window since this is available in the model list
+ this.updateContextWindow();
+ }
+
+ async updateContextWindow() {
+ try {
+ const { data } = await this.openai.models.list();
+ const model = data.find((m) => m.id === this.model);
+ if (model?.ctx_len) {
+ this.contextWindow = model.ctx_len;
+ this.limits = {
+ history: this.contextWindow * 0.15,
+ system: this.contextWindow * 0.15,
+ user: this.contextWindow * 0.7,
+ };
+ }
+ } catch (error) {
+ this.log(`Using default context window of ${this.contextWindow}`);
+ }
+ }
+
+ log(message) {
+ console.log(`[Jan AI] ${message}`);
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ #generateContent({ userPrompt, attachments = [] }) {
+ if (!attachments.length) {
+ return userPrompt;
+ }
+
+ const content = [{ type: "text", text: userPrompt }];
+ for (let attachment of attachments) {
+ content.push({
+ type: "image_url",
+ image_url: {
+ url: attachment.contentString,
+ },
+ });
+ }
+ return content.flat();
+ }
+
+ streamingEnabled() {
+ return true;
+ }
+
+ async isValidChatCompletionModel(model) {
+ try {
+ const { data } = await this.openai.models.list();
+ return data.some((m) => m.id === model);
+ } catch (error) {
+ console.error("Failed to validate model:", error);
+ return false;
+ }
+ }
+
+ promptWindowLimit() {
+ return this.contextWindow;
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ attachments = [],
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [
+ prompt,
+ ...formatChatHistory(chatHistory, this.#generateContent),
+ {
+ role: "user",
+ content: this.#generateContent({ userPrompt, attachments }),
+ },
+ ];
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `Jan AI chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.openai.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.message);
+ })
+ );
+
+ if (!result.output?.choices?.length) return null;
+
+ return {
+ textResponse: result.output.choices[0].message.content,
+ metrics: {
+ prompt_tokens: result.output.usage.prompt_tokens || 0,
+ completion_tokens: result.output.usage.completion_tokens || 0,
+ total_tokens: result.output.usage.total_tokens || 0,
+ outputTps: result.output.usage.completion_tokens / result.duration,
+ duration: result.duration,
+ },
+ };
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `Jan AI chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.openai.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ }),
+ messages
+ );
+
+ return measuredStreamRequest;
+ }
+
+ // Custom stream handler for Jan AI
+ // Jan AI does not send a finish_reason (always returns null) so we handle it manually using a timeout
+ handleStream(response, stream, responseProps) {
+ const { uuid = uuidv4(), sources = [] } = responseProps;
+ let lastChunkTime = null;
+ let usage = { completion_tokens: 0 };
+
+ return new Promise((resolve) => {
+ let fullText = "";
+ let reasoningText = "";
+
+ const handleAbort = () => {
+ stream?.endMeasurement(usage);
+ clientAbortedHandler(resolve, fullText);
+ };
+ response.on("close", handleAbort);
+
+ const timeoutCheck = setInterval(() => {
+ if (lastChunkTime === null) return;
+ const now = Number(new Date());
+ const diffMs = now - lastChunkTime;
+
+ if (diffMs >= this.timeout) {
+ this.log(
+ `Stream stale for >${this.timeout}ms. Closing response stream.`
+ );
+ writeResponseChunk(response, {
+ uuid,
+ sources,
+ type: "textResponseChunk",
+ textResponse: "",
+ close: true,
+ error: false,
+ });
+ clearInterval(timeoutCheck);
+ response.removeListener("close", handleAbort);
+ stream?.endMeasurement(usage);
+ resolve(fullText);
+ }
+ }, 100);
+
+ const processStream = async () => {
+ try {
+ for await (const chunk of stream) {
+ lastChunkTime = Number(new Date());
+ const message = chunk?.choices?.[0];
+ const token = message?.delta?.content;
+ const reasoningToken = message?.delta?.reasoning_content;
+
+ if (reasoningToken) {
+ if (reasoningText.length === 0) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: `${reasoningToken}`,
+ close: false,
+ error: false,
+ });
+ reasoningText = `${reasoningToken}`;
+ } else {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: reasoningToken,
+ close: false,
+ error: false,
+ });
+ reasoningText += reasoningToken;
+ }
+ }
+
+ if (!!reasoningText && !reasoningToken && token) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: "",
+ close: false,
+ error: false,
+ });
+ fullText += `${reasoningText}`;
+ reasoningText = "";
+ }
+
+ if (token) {
+ fullText += token;
+ usage.completion_tokens++;
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: token,
+ close: false,
+ error: false,
+ });
+ }
+ }
+ } catch (e) {
+ clearInterval(timeoutCheck);
+ writeResponseChunk(response, {
+ uuid,
+ type: "abort",
+ textResponse: null,
+ sources: [],
+ close: true,
+ error: e.message,
+ });
+ response.removeListener("close", handleAbort);
+ stream?.endMeasurement(usage);
+ resolve(fullText);
+ }
+ };
+
+ processStream();
+ });
+ }
+
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+}
+
+module.exports = { JanAiLLM };
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index d6b22d3a9a..c7457cfa32 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -830,6 +830,8 @@ ${this.getHistory({ to: route.to })
return new Providers.GeminiProvider({ model: config.model });
case "dpais":
return new Providers.DellProAiStudioProvider({ model: config.model });
+ case "janai":
+ return new Providers.JanAiProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 07867e4c6e..375a324877 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -184,6 +184,14 @@ class Provider {
apiKey: process.env.GEMINI_API_KEY ?? null,
...config,
});
+ case "janai":
+ return new ChatOpenAI({
+ configuration: {
+ baseURL: "http://127.0.0.1:1337/v1",
+ },
+ apiKey: process.env.JAN_AI_API_KEY ?? null,
+ ...config,
+ });
case "moonshotai":
return new ChatOpenAI({
configuration: {
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 859ad9de9d..9081a3f97c 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -23,6 +23,7 @@ const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
const GeminiProvider = require("./gemini.js");
const DellProAiStudioProvider = require("./dellProAiStudio.js");
+const JanAiProvider = require("./janAi.js");
const MoonshotAiProvider = require("./moonshotAi.js");
module.exports = {
@@ -51,5 +52,6 @@ module.exports = {
PPIOProvider,
GeminiProvider,
DellProAiStudioProvider,
+ JanAiProvider,
MoonshotAiProvider,
};
diff --git a/server/utils/agents/aibitat/providers/janAi.js b/server/utils/agents/aibitat/providers/janAi.js
new file mode 100644
index 0000000000..a13af0d28c
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/janAi.js
@@ -0,0 +1,93 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+class JanAiProvider extends InheritMultiple([Provider, UnTooled]) {
+ model;
+
+ constructor(config = {}) {
+ const { model } = config;
+ super();
+ const client = new OpenAI({
+ baseURL: "http://127.0.0.1:1337/v1",
+ apiKey: process.env.JAN_AI_API_KEY,
+ maxRetries: 3,
+ });
+
+ this._client = client;
+ this.model = model;
+ this.verbose = true;
+ }
+
+ get client() {
+ return this._client;
+ }
+
+ async #handleFunctionCallChat({ messages = [] }) {
+ return await this.client.chat.completions
+ .create({
+ model: this.model,
+ temperature: 0,
+ messages,
+ })
+ .then((result) => {
+ if (!result.hasOwnProperty("choices"))
+ throw new Error("Jan AI chat: No results!");
+ if (result.choices.length === 0)
+ throw new Error("Jan AI chat: No results length!");
+ return result.choices[0].message.content;
+ })
+ .catch((_) => {
+ return null;
+ });
+ }
+
+ async complete(messages, functions = []) {
+ try {
+ let completion;
+ if (functions.length > 0) {
+ const { toolCall, text } = await this.functionCall(
+ messages,
+ functions,
+ this.#handleFunctionCallChat.bind(this)
+ );
+
+ if (toolCall !== null) {
+ this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+ this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+ return {
+ result: null,
+ functionCall: {
+ name: toolCall.name,
+ arguments: toolCall.arguments,
+ },
+ cost: 0,
+ };
+ }
+ completion = { content: text };
+ }
+
+ if (!completion?.content) {
+ this.providerLog(
+ "Will assume chat completion without tool call inputs."
+ );
+ const response = await this.client.chat.completions.create({
+ model: this.model,
+ messages: this.cleanMsgs(messages),
+ });
+ completion = response.choices[0].message;
+ }
+
+ this.deduplicator.reset("runs");
+ return {
+ result: completion.content,
+ cost: 0,
+ };
+ } catch (error) {
+ throw error;
+ }
+ }
+}
+
+module.exports = JanAiProvider;
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 4527ee783b..46c7f7ae05 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -199,6 +199,10 @@ class AgentHandler {
"Dell Pro AI Studio model must be set to use agents."
);
break;
+ case "janai":
+ if (!process.env.JAN_AI_MODEL_PREF)
+ throw new Error("Jan AI model must be set to use agents.");
+ break;
case "moonshotai":
if (!process.env.MOONSHOT_AI_MODEL_PREF)
throw new Error("Moonshot AI model must be set to use agents.");
@@ -274,6 +278,8 @@ class AgentHandler {
return process.env.GEMINI_LLM_MODEL_PREF ?? "gemini-2.0-flash-lite";
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
+ case "janai":
+ return process.env.JAN_AI_MODEL_PREF;
default:
return null;
}
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index e0a1fb820e..eeb2e17ddc 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -32,6 +32,7 @@ const SUPPORT_CUSTOM_MODELS = [
"gemini",
"ppio",
"dpais",
+ "janai",
"moonshotai",
// Embedding Engines
"native-embedder",
@@ -86,6 +87,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getPPIOModels(apiKey);
case "dpais":
return await getDellProAiStudioModels(basePath);
+ case "janai":
+ return await getJanAiModels(apiKey, basePath);
case "moonshotai":
return await getMoonshotAiModels(apiKey);
case "native-embedder":
@@ -681,6 +684,33 @@ async function getDellProAiStudioModels(basePath = null) {
}
}
+async function getJanAiModels(_apiKey = null, basePath = null) {
+ const apiKey =
+ _apiKey === true
+ ? process.env.JAN_AI_API_KEY
+ : _apiKey || process.env.JAN_AI_API_KEY || null;
+
+ const { OpenAI: OpenAIApi } = require("openai");
+ const openai = new OpenAIApi({
+ baseURL: basePath || process.env.JAN_AI_BASE_PATH,
+ apiKey,
+ });
+ const models = await openai.models
+ .list()
+ .then((results) => results.data)
+ .catch((e) => {
+ console.error(`JanAi:listModels`, e.message);
+ return [];
+ });
+
+ // Api Key was successful so lets save it for future uses
+ if (models.length > 0 && !!apiKey) {
+ process.env.JAN_AI_API_KEY = apiKey;
+ if (basePath) process.env.JAN_AI_BASE_PATH = basePath;
+ }
+ return { models, error: null };
+}
+
function getNativeEmbedderModels() {
const { NativeEmbedder } = require("../EmbeddingEngines/native");
return { models: NativeEmbedder.availableModels(), error: null };
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 9e101a2be3..85899e81cc 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -209,6 +209,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return new DellProAiStudioLLM(embedder, model);
+ case "janai":
+ const { JanAiLLM } = require("../AiProviders/janAi");
+ return new JanAiLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -356,6 +359,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return DellProAiStudioLLM;
+ case "janai":
+ const { JanAiLLM } = require("../AiProviders/janAi");
+ return JanAiLLM;
case "moonshotai":
const { MoonshotAiLLM } = require("../AiProviders/moonshotAi");
return MoonshotAiLLM;
@@ -425,6 +431,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
return process.env.PPIO_API_KEY;
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
+ case "janai":
+ return process.env.JAN_AI_MODEL_PREF;
case "moonshotai":
return process.env.MOONSHOT_AI_MODEL_PREF;
default:
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index d92cd36df9..8cfef15746 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -677,6 +677,23 @@ const KEY_MAPPING = {
checks: [isNotEmpty],
},
+ // Jan AI Options
+ JanAiApiKey: {
+ envKey: "JAN_AI_API_KEY",
+ checks: [isNotEmpty],
+ },
+ JanAiModelPref: {
+ envKey: "JAN_AI_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
+ JanAiBasePath: {
+ envKey: "JAN_AI_BASE_PATH",
+ checks: [isValidURL],
+ },
+ JanAiModelTokenLimit: {
+ envKey: "JAN_AI_MODEL_TOKEN_LIMIT",
+ },
+
// Moonshot AI Options
MoonshotAiApiKey: {
envKey: "MOONSHOT_AI_API_KEY",
@@ -794,6 +811,7 @@ function supportedLLM(input = "") {
"nvidia-nim",
"ppio",
"dpais",
+ "janai",
"moonshotai",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;