diff --git a/docker/.env.example b/docker/.env.example
index 5ea5ce609da..3f99e52d436 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -74,6 +74,7 @@ GID='1000'
# LLM_PROVIDER='textgenwebui'
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
+# TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
diff --git a/frontend/src/components/LLMSelection/TextGenWebUIOptions/index.jsx b/frontend/src/components/LLMSelection/TextGenWebUIOptions/index.jsx
index ec29222d244..50bb841545b 100644
--- a/frontend/src/components/LLMSelection/TextGenWebUIOptions/index.jsx
+++ b/frontend/src/components/LLMSelection/TextGenWebUIOptions/index.jsx
@@ -32,6 +32,20 @@ export default function TextGenWebUIOptions({ settings }) {
autoComplete="off"
/>
+
+
+
+
);
}
diff --git a/server/.env.example b/server/.env.example
index 008d90d2fc7..8f424d82a6e 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -71,6 +71,7 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# LLM_PROVIDER='textgenwebui'
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
+# TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 21d7af21797..3c23a7d3925 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -367,6 +367,7 @@ const SystemSettings = {
// Text Generation Web UI Keys
TextGenWebUIBasePath: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
TextGenWebUITokenLimit: process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT,
+ TextGenWebUIAPIKey: !!process.env.TEXT_GEN_WEB_UI_API_KEY,
// Generic OpenAI Keys
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
diff --git a/server/utils/AiProviders/textGenWebUI/index.js b/server/utils/AiProviders/textGenWebUI/index.js
index ae0282a30fc..dfce76a5a5e 100644
--- a/server/utils/AiProviders/textGenWebUI/index.js
+++ b/server/utils/AiProviders/textGenWebUI/index.js
@@ -14,7 +14,7 @@ class TextGenWebUILLM {
this.basePath = process.env.TEXT_GEN_WEB_UI_BASE_PATH;
this.openai = new OpenAIApi({
baseURL: this.basePath,
- apiKey: null,
+ apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? null,
});
this.model = null;
this.limits = {
diff --git a/server/utils/agents/aibitat/providers/textgenwebui.js b/server/utils/agents/aibitat/providers/textgenwebui.js
index 767577d423c..d1e424255a4 100644
--- a/server/utils/agents/aibitat/providers/textgenwebui.js
+++ b/server/utils/agents/aibitat/providers/textgenwebui.js
@@ -13,7 +13,7 @@ class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) {
super();
const client = new OpenAI({
baseURL: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
- apiKey: null,
+ apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? null,
maxRetries: 3,
});
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index e2f5c7526e9..947fbc62492 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -155,6 +155,10 @@ const KEY_MAPPING = {
envKey: "TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
+ TextGenWebUIAPIKey: {
+ envKey: "TEXT_GEN_WEB_UI_API_KEY",
+ checks: [],
+ },
// Generic OpenAI InferenceSettings
GenericOpenAiBasePath: {