diff --git a/docker/.env.example b/docker/.env.example index 5f1b0e44c4e..d3cc68e1240 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -42,6 +42,7 @@ GID='1000' # OLLAMA_BASE_PATH='http://host.docker.internal:11434' # OLLAMA_MODEL_PREF='llama2' # OLLAMA_MODEL_TOKEN_LIMIT=4096 +# OLLAMA_AUTH_TOKEN='your-ollama-auth-token-here (optional, only for ollama running behind auth - Bearer token)' # LLM_PROVIDER='togetherai' # TOGETHER_AI_API_KEY='my-together-ai-key' diff --git a/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx b/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx index d04f7cb622a..c98877c5e29 100644 --- a/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx +++ b/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx @@ -11,12 +11,15 @@ export default function OllamaLLMOptions({ settings }) { autoDetecting: loading, basePath, basePathValue, + authToken, + authTokenValue, showAdvancedControls, setShowAdvancedControls, handleAutoDetectClick, } = useProviderEndpointAutoDiscovery({ provider: "ollama", initialBasePath: settings?.OllamaLLMBasePath, + initialAuthToken: settings?.OllamaLLMAuthToken, ENDPOINTS: OLLAMA_COMMON_URLS, }); const [performanceMode, setPerformanceMode] = useState( @@ -32,6 +35,7 @@ export default function OllamaLLMOptions({ settings }) {