From a48119f22eb85d89f91920ff41501cace2b0d985 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 02:56:31 +0530 Subject: [PATCH 1/9] feat: add n1n LLM provider with OpenAI-compatible API integration --- server/utils/AiProviders/n1n/index.js | 244 ++++++++++++++++++++++++++ 1 file changed, 244 insertions(+) create mode 100644 server/utils/AiProviders/n1n/index.js diff --git a/server/utils/AiProviders/n1n/index.js b/server/utils/AiProviders/n1n/index.js new file mode 100644 index 00000000000..d4efbc408ef --- /dev/null +++ b/server/utils/AiProviders/n1n/index.js @@ -0,0 +1,244 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); +const { + LLMPerformanceMonitor, +} = require("../../helpers/chat/LLMPerformanceMonitor"); +const { v4: uuidv4 } = require("uuid"); +const { + writeResponseChunk, + clientAbortedHandler, +} = require("../../helpers/chat/responses"); + +class N1nLLM { + constructor(embedder = null, modelPreference = null) { + if (!process.env.N1N_API_KEY) + throw new Error("No n1n API key was set."); + + this.className = "N1nLLM"; + const { OpenAI: OpenAIApi } = require("openai"); + this.basePath = "https://api.n1n.ai/v1"; + this.openai = new OpenAIApi({ + baseURL: this.basePath, + apiKey: process.env.N1N_API_KEY ?? null, + defaultHeaders: { + "HTTP-Referer": "https://anythingllm.com", + "X-Title": "AnythingLLM", + }, + }); + this.model = modelPreference || process.env.N1N_MODEL_PREF || "gpt-4o"; + this.limits = { + history: this.promptWindowLimit() * 0.15, + system: this.promptWindowLimit() * 0.15, + user: this.promptWindowLimit() * 0.7, + }; + + this.embedder = embedder ?? new NativeEmbedder(); + this.defaultTemp = 0.7; + this.log(`Initialized with model: ${this.model}`); + } + + log(text, ...args) { + console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args); + } + + #appendContext(contextTexts = []) { + if (!contextTexts || !contextTexts.length) return ""; + return ( + "\nContext:\n" + + contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("") + ); + } + + streamingEnabled() { + return "streamGetChatCompletion" in this; + } + + static promptWindowLimit(_modelName) { + return 128000; + } + + promptWindowLimit() { + return 128000; + } + + async isValidChatCompletionModel(modelName = "") { + const models = await this.openai.models + .list() + .catch(() => ({ data: [] })); + return models.data.some((model) => model.id === modelName); + } + + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + const prompt = { + role: "system", + content: `${systemPrompt}${this.#appendContext(contextTexts)}`, + }; + return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; + } + + async getChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `n1n chat: ${this.model} is not valid for chat completion!` + ); + + const result = await LLMPerformanceMonitor.measureAsyncFunction( + this.openai.chat.completions + .create({ + model: this.model, + messages, + temperature, + }) + .catch((e) => { + throw new Error(e.message); + }) + ); + + if ( + !result?.output?.hasOwnProperty("choices") || + result?.output?.choices?.length === 0 + ) + throw new Error( + `Invalid response from n1n: ${result.output?.error?.message || "Unknown error"}` + ); + + return { + textResponse: result.output.choices[0].message.content, + metrics: { + prompt_tokens: result.output.usage?.prompt_tokens || 0, + completion_tokens: result.output.usage?.completion_tokens || 0, + total_tokens: result.output.usage?.total_tokens || 0, + outputTps: + (result.output.usage?.completion_tokens || 0) / result.duration, + duration: result.duration, + }, + }; + } + + async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { + if (!(await this.isValidChatCompletionModel(this.model))) + throw new Error( + `n1n chat: ${this.model} is not valid for chat completion!` + ); + + const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( + this.openai.chat.completions.create({ + model: this.model, + stream: true, + messages, + temperature, + }), + messages, + false + ); + + return measuredStreamRequest; + } + + handleStream(response, stream, responseProps) { + const { uuid = uuidv4(), sources = [] } = responseProps; + let usage = { + completion_tokens: 0, + }; + + return new Promise(async (resolve) => { + let fullText = ""; + + const handleAbort = () => { + stream?.endMeasurement(usage); + clientAbortedHandler(resolve, fullText); + }; + response.on("close", handleAbort); + + try { + for await (const chunk of stream) { + const message = chunk?.choices?.[0]; + const token = message?.delta?.content; + + if ( + chunk.hasOwnProperty("usage") && + !!chunk.usage && + Object.values(chunk.usage).length > 0 + ) { + if (chunk.usage.hasOwnProperty("prompt_tokens")) { + usage.prompt_tokens = Number(chunk.usage.prompt_tokens); + } + if (chunk.usage.hasOwnProperty("completion_tokens")) { + usage.completion_tokens = Number(chunk.usage.completion_tokens); + } + } + + if (token) { + fullText += token; + writeResponseChunk(response, { + uuid, + sources: [], + type: "textResponseChunk", + textResponse: token, + close: false, + error: false, + }); + } + + if ( + message?.hasOwnProperty("finish_reason") && + message.finish_reason !== "" && + message.finish_reason !== null + ) { + writeResponseChunk(response, { + uuid, + sources, + type: "textResponseChunk", + textResponse: "", + close: true, + error: false, + }); + response.removeListener("close", handleAbort); + stream?.endMeasurement(usage); + resolve(fullText); + break; + } + } + } catch (e) { + console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`); + writeResponseChunk(response, { + uuid, + type: "abort", + textResponse: null, + sources: [], + close: true, + error: e.message, + }); + stream?.endMeasurement(usage); + resolve(fullText); + } + }); + } + + async embedTextInput(textInput) { + return await this.embedder.embedTextInput(textInput); + } + + async embedChunks(textChunks = []) { + return await this.embedder.embedChunks(textChunks); + } + + async compressMessages(promptArgs = {}, rawHistory = []) { + const { messageArrayCompressor } = require("../../helpers/chat"); + const messageArray = this.constructPrompt(promptArgs); + return await messageArrayCompressor(this, messageArray, rawHistory); + } +} + +module.exports = { + N1nLLM, +}; + From d4fd46c065e4691bf4c9dbfa3f03c7e2df4b7e19 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 02:57:04 +0530 Subject: [PATCH 2/9] feat: add n1n agent provider for aibitat workflows --- server/utils/agents/aibitat/providers/n1n.js | 87 ++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 server/utils/agents/aibitat/providers/n1n.js diff --git a/server/utils/agents/aibitat/providers/n1n.js b/server/utils/agents/aibitat/providers/n1n.js new file mode 100644 index 00000000000..56f2652dd88 --- /dev/null +++ b/server/utils/agents/aibitat/providers/n1n.js @@ -0,0 +1,87 @@ +const OpenAI = require("openai"); +const Provider = require("./ai-provider.js"); +const InheritMultiple = require("./helpers/classes.js"); +const UnTooled = require("./helpers/untooled.js"); +const { toValidNumber } = require("../../../http/index.js"); + +class N1nProvider extends InheritMultiple([Provider, UnTooled]) { + model; + + constructor(config = {}) { + super(); + const { model = "gpt-4o" } = config; + const client = new OpenAI({ + baseURL: "https://api.n1n.ai/v1", + apiKey: process.env.N1N_API_KEY ?? null, + maxRetries: 3, + }); + + this._client = client; + this.model = model; + this.verbose = true; + this.maxTokens = process.env.N1N_MAX_TOKENS + ? toValidNumber(process.env.N1N_MAX_TOKENS, 1024) + : 1024; + } + + get client() { + return this._client; + } + + get supportsAgentStreaming() { + return true; + } + + async #handleFunctionCallChat({ messages = [] }) { + return await this.client.chat.completions + .create({ + model: this.model, + messages, + max_tokens: this.maxTokens, + }) + .then((result) => { + if (!result.hasOwnProperty("choices")) + throw new Error("n1n chat: No results!"); + if (result.choices.length === 0) + throw new Error("n1n chat: No results length!"); + return result.choices[0].message.content; + }) + .catch((_) => { + return null; + }); + } + + async #handleFunctionCallStream({ messages = [] }) { + return await this.client.chat.completions.create({ + model: this.model, + stream: true, + messages, + }); + } + + async stream(messages, functions = [], eventHandler = null) { + return await UnTooled.prototype.stream.call( + this, + messages, + functions, + this.#handleFunctionCallStream.bind(this), + eventHandler + ); + } + + async complete(messages, functions = []) { + return await UnTooled.prototype.complete.call( + this, + messages, + functions, + this.#handleFunctionCallChat.bind(this) + ); + } + + getCost(_usage) { + return 0; + } +} + +module.exports = N1nProvider; + From adabfae07f012a8b34107b6ddae9c487ec629333 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 02:57:26 +0530 Subject: [PATCH 3/9] feat: add n1n options component for API key and model selection --- .../LLMSelection/N1nOptions/index.jsx | 99 +++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 frontend/src/components/LLMSelection/N1nOptions/index.jsx diff --git a/frontend/src/components/LLMSelection/N1nOptions/index.jsx b/frontend/src/components/LLMSelection/N1nOptions/index.jsx new file mode 100644 index 00000000000..4ac01259d5f --- /dev/null +++ b/frontend/src/components/LLMSelection/N1nOptions/index.jsx @@ -0,0 +1,99 @@ +import { useState, useEffect } from "react"; +import System from "@/models/system"; + +export default function N1nOptions({ settings }) { + const [inputValue, setInputValue] = useState(settings?.N1nApiKey); + const [n1nApiKey, setN1nApiKey] = useState(settings?.N1nApiKey); + + return ( +
+
+ + setInputValue(e.target.value)} + onBlur={() => setN1nApiKey(inputValue)} + /> +
+ {!settings?.credentialsOnly && ( + + )} +
+ ); +} + +function N1nModelSelection({ apiKey, settings }) { + const [models, setModels] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function findCustomModels() { + if (!apiKey) { + setModels([]); + setLoading(true); + return; + } + + setLoading(true); + const { models } = await System.customModels( + "n1n", + typeof apiKey === "boolean" ? null : apiKey + ); + setModels(models || []); + setLoading(false); + } + findCustomModels(); + }, [apiKey]); + + if (loading) { + return ( +
+ + +
+ ); + } + + return ( +
+ + +
+ ); +} + From deff21b64c2b8f7ba6e4e103a3d8fb15b9fd9c86 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 02:57:51 +0530 Subject: [PATCH 4/9] feat: add n1n logo for provider branding --- frontend/src/media/llmprovider/n1n.png | Bin 0 -> 4092 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 frontend/src/media/llmprovider/n1n.png diff --git a/frontend/src/media/llmprovider/n1n.png b/frontend/src/media/llmprovider/n1n.png new file mode 100644 index 0000000000000000000000000000000000000000..c9611219917490a181769ec1ba2558beaf380eee GIT binary patch literal 4092 zcmb_fXH-*5yN(qEk*0tY5d{k%91uk5NDUlNngl}c5Fk=ROAu*F5fo_wK}111Qi8Mq z(v36;p#}sAC6FMUP?9g`S@*~Nb?&$BUTg1J>zR4xoxR?9-kIn5Lj!FVCSE200Kfv& z(J-R7-A6Cmar)P%h!9I}E<5NL=>q`4*8qSgkpRFR9eT0|0Qmm}01#~f0QqzPfctS) zy`dt#@zZyq zcC9T~`X*{fzr5#VD9qlY=43KlO?Im(Y9>PcEi{J=QgT{fueA{$Bp5={7i}$Sqny6n z$N=&K zdTs(t3`p^6LXO*w7lU&sSpg@)qwY9`)lZa-ZkUci1;Pp@5kl1A_YXRvQc-eVH{~BD zU5#XWCs4`|9r+6pCi?v|4yvm^w+ul9HCZ<2Z@duSf4^%8R3{vO)$6xZ5$8M^gF(OQ zJ0p6^)P{;`^=V7A!!LDnT^|HIuh^KN=DFjSc7j2G@F<4fAmI|@5aV7k!R(Y!nny=Mq~tzqi~ zwxZO!da{9bv&m=|#Emhd--8`W(|=@FK5Q%tQ@0T*=4<{qS2a8IQfWTAc?@rUahUDU z?75K85b1KofJNCZo3E?2DPbjAb3QgLwS2-6=B`lsZ0RzM`}np?|D-X`J-5ze&S(V& z?RhPwSrjEZr_r~4y(-s@No!^gB~;tqmh!~OFMwXt$W?hP_i9G0>hb2@So zgwlg~G=Sb>24G%fTI`~f+(B>50kNMtSHe~VS%s!w*q)&mu50UJ-fBE&cCx63 z$8gEMf6~=uVyZXN!zE;4Y95ihEp`0O(V;smhuB#6FkVY;WI zq-Ty!zf)zMr(YU|d#Z%{e*AtPXU(emkp1oA3H^q9<(aoc3m4$EG(oNnM^gK7t7x=u z9*=M|mrSf$U+xvxVT1jg_}oqw1+)0?;sNNz&*OJW+1)2U`#QyjJeTT$dURt|ls8%d zaQm_JWw{QMLF<(i*Pe<<3VerINVI*{nYEp2=8>lDnucF1o=_`=gCX-3COyjU(mMHX z4u+;dgU-!{8SacSpSVW-G+B}FtCf0?qU$r6K?&^>Llue8LP&UuXYgD8qL6YeZuB0k z9DNA;9c;DQ`C&%-BrFIs!SlUzA71-9+=3HC<5ChYjM$wrHDjEyE-uC@!9zB(tm9Nl zD402z%MNEl4J%|J$YGineF?dXb?}eL0aMqZ=kd>uWNynRgn!k!c|{PNc{G>k{8T9% zq|l#zfiL6n{n^WCKgdjPaQ}9#LcbU&xLU|*uE-^%wikg@QlNfw3^OaE^|WwAZ5xer zi7#dov)QU8h1IT9RF-<#lz0{3h+p_|u*c%@LDkxo4ecs2Zz&fSbTRIGeL`N#WX(C>g%A46!|B>`Vq=wlD z_V@IrKwC`mh?gY=KKCr6{*0lW*dvm8D0l>fSkZ!+k0fFFQXvZyx4?=|6Ss*b47&)DmIP)s~#qgsM4O4v+3%#!vV}2#GpDH=)9WErMym^Zwmdb_` zCR%MB#i-%H`13%!LbudYlFQPfM2wu`=yLNjnX;|SD~4Z^h-;>9O!3t7h2=kYw>9Bd zWQ$Q4E98Cr#f1Hn{CEHIP3#Dx781I=R7RD&yAd0&12hTb^^0HmI64m{-16sae?5T~ zOcct7e;~KK@5IAz0T@T{^V3Q)M4YD{iE0F58SJIjD9mN>oke z<~os3IxBV1RaPU&alNLw9Op}F{tYUVhUJ;QA;BFF%d-pw+u!I<-dMg&_F40Xe0p~} z;(Ly+z;6xwTI>)=1-O?^(G}^=bJoBg1tYUC;jPe34ct4|g=UiKOziVI`nUzmSoUVi za8^tZ;PX16!)VC}$*JK^8JrB(2(tBiue;^HlsmO7xaDT~VnB9dg~|$po*Lqo;?O%u zXG&-)$Wpd1q1?W@0gvt$&V@?>ISA*c^r%JGY1xKP?zj-TmKy`*gSbw1o41;newLBc zyyv-ihc7mvm;`Tl5ZC+qT*(3dpM|0odaoov4)aHI$SAQ#vwF9X>7B^v@qGUtCY}l& z=F+y;n_9a2DcPC9Tdow5ZvDfk2(q&mC|BmeAH>I{WR{u$wHjSPmoxE#Rqu6*UzlIoG9j$_gy)G}`KB66~N73;d< z8fJs=_=C^Y8qaI)WY^k!4eLfF%-R}-#$s1e?DnD%QBcNY=q0IK+{x6v5YSaZpX=`7 zRmqJ=ZdUvK42e!J27I=EKgWraBGynsd$`mDQ63g&QQSly{_X_kcG4j9Kzeu9N|Fp* zrLRqw;Neyciq6^p6zAKvQTx+xzSW_l@{FQLu!;j1#gfJu@+5qh6x+!blh+;-e;t!MMzDLKWup~ zkt`-`P!P@y109#i&w|MkAyRY=*fa3EG@pD^|rWeR;5q$tGq#k zw2+68bx6qG6nnnJ z!R}j2b`RN-mV8WvBLv+c7p{4f+?`TI$Q!3d>0Z$2wcEU<=}&K&;QaR6t@B<*13jvH zKDDwt4{*aik>CFjHKz6L%0(8;U#m8RxyDzTX+FNDZuvXbKI=+gqs}&w=t@2AST48i zF};;U<3F3+s#$e=V{Zygl$ZAxUzES!*N_n(rE8h!ka=Emtj!41aPcEuzmqMmR=8!9 zws5pq5{BJ+inDInJ2l&Q0Fb-nHA2sO%sJgn%kwl*M|XYOo3F)`+xd|T`~y<;81d&3 z*CvQx%YSVfLs2*u(CbaF=$)VI9CBalCY%%(GrPQxrl+D);zuyD0_~}&&)QcP*jWT-*^WNV3^5nvrVESF3} zm4pqP+cq~2nZnA}LN9`VgOZ0rk~aO;hV&wWcf{Xzw@_WgC--h6Cdb+nDyMqici#lv zOZ>^hDoVvf92K{Z`EqO>1tPkklR%aE;x5VF?-C7p o%^Mj^p?mK$ewrXZ4?BA&4}h1H*W-ioBX17?Y8q&i-M4-EA77-n>i_@% literal 0 HcmV?d00001 From 434db81e7264431bf7e730931c6a1901ec8d3d97 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 03:00:26 +0530 Subject: [PATCH 5/9] feat: register n1n provider in getLLMProvider and getLLMProviderClass --- server/utils/helpers/index.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index 819a464c6d0..0a8c3c2b532 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -218,6 +218,9 @@ function getLLMProvider({ provider = null, model = null } = {}) { case "foundry": const { FoundryLLM } = require("../AiProviders/foundry"); return new FoundryLLM(embedder, model); + case "n1n": + const { N1nLLM } = require("../AiProviders/n1n"); + return new N1nLLM(embedder, model); default: throw new Error( `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}` @@ -374,6 +377,9 @@ function getLLMProviderClass({ provider = null } = {}) { case "foundry": const { FoundryLLM } = require("../AiProviders/foundry"); return FoundryLLM; + case "n1n": + const { N1nLLM } = require("../AiProviders/n1n"); + return N1nLLM; default: return null; } From 7550245910b6b3d252686a08d3e0f4b29310f7f9 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 03:01:07 +0530 Subject: [PATCH 6/9] feat: export N1nProvider for agent system --- server/utils/agents/aibitat/providers/index.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js index 8cf2e7422b3..0bb46dd9053 100644 --- a/server/utils/agents/aibitat/providers/index.js +++ b/server/utils/agents/aibitat/providers/index.js @@ -26,6 +26,7 @@ const DellProAiStudioProvider = require("./dellProAiStudio.js"); const MoonshotAiProvider = require("./moonshotAi.js"); const CometApiProvider = require("./cometapi.js"); const FoundryProvider = require("./foundry.js"); +const N1nProvider = require("./n1n.js"); module.exports = { OpenAIProvider, @@ -56,4 +57,5 @@ module.exports = { DellProAiStudioProvider, MoonshotAiProvider, FoundryProvider, + N1nProvider, }; From 44b71aaeaec43ba9dedf4e69acc04419ef875bfe Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 03:01:32 +0530 Subject: [PATCH 7/9] feat: add n1n case to getProviderForConfig in aibitat --- server/utils/agents/aibitat/index.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js index 65b5a146dda..6005ad7cfe5 100644 --- a/server/utils/agents/aibitat/index.js +++ b/server/utils/agents/aibitat/index.js @@ -974,6 +974,8 @@ ${this.getHistory({ to: route.to }) return new Providers.CometApiProvider({ model: config.model }); case "foundry": return new Providers.FoundryProvider({ model: config.model }); + case "n1n": + return new Providers.N1nProvider({ model: config.model }); default: throw new Error( `Unknown provider: ${config.provider}. Please use a valid provider.` From 89954154900e5a1fbcca84f40d7dad98abe83aeb Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 03:02:00 +0530 Subject: [PATCH 8/9] feat: add n1n to onboarding LLM provider selection --- .../pages/OnboardingFlow/Steps/LLMPreference/index.jsx | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx index 7a16985fe11..9df78583f48 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx @@ -29,6 +29,7 @@ import PPIOLogo from "@/media/llmprovider/ppio.png"; import DellProAiStudioLogo from "@/media/llmprovider/dpais.png"; import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png"; import CometApiLogo from "@/media/llmprovider/cometapi.png"; +import N1nLogo from "@/media/llmprovider/n1n.png"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions"; @@ -59,6 +60,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions"; import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions"; import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions"; import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions"; +import N1nOptions from "@/components/LLMSelection/N1nOptions"; import LLMItem from "@/components/LLMSelection/LLMItem"; import System from "@/models/system"; @@ -281,6 +283,13 @@ const LLMS = [ options: (settings) => , description: "500+ AI Models all in one API.", }, + { + name: "n1n", + value: "n1n", + logo: N1nLogo, + options: (settings) => , + description: "Access 400+ LLMs and multimodal models through n1n API.", + }, ]; export default function LLMPreference({ From 191b1e6b7ff2806f2db77065cded177c2a9a06b7 Mon Sep 17 00:00:00 2001 From: naaa760 Date: Tue, 14 Oct 2025 03:02:49 +0530 Subject: [PATCH 9/9] feat: add n1n to general settings LLM providers --- .../src/pages/GeneralSettings/LLMPreference/index.jsx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx index 671f7e867da..88b55d951d0 100644 --- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx +++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx @@ -35,6 +35,7 @@ import DellProAiStudioLogo from "@/media/llmprovider/dpais.png"; import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png"; import CometApiLogo from "@/media/llmprovider/cometapi.png"; import FoundryLogo from "@/media/llmprovider/foundry-local.png"; +import N1nLogo from "@/media/llmprovider/n1n.png"; import PreLoader from "@/components/Preloader"; import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions"; @@ -67,6 +68,7 @@ import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions"; import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions"; import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions"; import FoundryOptions from "@/components/LLMSelection/FoundryOptions"; +import N1nOptions from "@/components/LLMSelection/N1nOptions"; import LLMItem from "@/components/LLMSelection/LLMItem"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; @@ -349,6 +351,14 @@ export const AVAILABLE_LLM_PROVIDERS = [ "GenericOpenAiKey", ], }, + { + name: "n1n", + value: "n1n", + logo: N1nLogo, + options: (settings) => , + description: "Access 400+ LLMs and multimodal models through n1n API.", + requiredConfig: ["N1nApiKey"], + }, ]; export default function GeneralLLMPreference() {