diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js index d5ee1f9d3d3..215fa5fbb86 100644 --- a/server/utils/AiProviders/anthropic/index.js +++ b/server/utils/AiProviders/anthropic/index.js @@ -3,6 +3,7 @@ const { writeResponseChunk, clientAbortedHandler, } = require("../../helpers/chat/responses"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); class AnthropicLLM { constructor(embedder = null, modelPreference = null) { @@ -23,11 +24,7 @@ class AnthropicLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID ANTHROPIC SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Anthropic as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js index a2ab556dbdb..3678567e3f3 100644 --- a/server/utils/AiProviders/azureOpenAi/index.js +++ b/server/utils/AiProviders/azureOpenAi/index.js @@ -1,4 +1,4 @@ -const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { writeResponseChunk, clientAbortedHandler, @@ -23,11 +23,7 @@ class AzureOpenAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!" - ); - this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/cohere/index.js b/server/utils/AiProviders/cohere/index.js index a97a15fcadf..6357d503e5a 100644 --- a/server/utils/AiProviders/cohere/index.js +++ b/server/utils/AiProviders/cohere/index.js @@ -19,7 +19,8 @@ class CohereLLM { system: this.promptWindowLimit() * 0.15, user: this.promptWindowLimit() * 0.7, }; - this.embedder = !!embedder ? embedder : new NativeEmbedder(); + + this.embedder = embedder ?? new NativeEmbedder(); } #appendContext(contextTexts = []) { diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js index 962b120136e..3dd307aff20 100644 --- a/server/utils/AiProviders/gemini/index.js +++ b/server/utils/AiProviders/gemini/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { writeResponseChunk, clientAbortedHandler, @@ -26,11 +27,7 @@ class GeminiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; // not used for Gemini } diff --git a/server/utils/AiProviders/genericOpenAi/index.js b/server/utils/AiProviders/genericOpenAi/index.js index 686d4c67775..dc0264e487a 100644 --- a/server/utils/AiProviders/genericOpenAi/index.js +++ b/server/utils/AiProviders/genericOpenAi/index.js @@ -27,11 +27,7 @@ class GenericOpenAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for GenericOpenAiLLM - falling back to NativeEmbedder for embedding!" - ); - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js index 01d92f00693..24a430e62ec 100644 --- a/server/utils/AiProviders/groq/index.js +++ b/server/utils/AiProviders/groq/index.js @@ -20,7 +20,7 @@ class GroqLLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/huggingface/index.js b/server/utils/AiProviders/huggingface/index.js index 6a79880c86f..d25725c2a0c 100644 --- a/server/utils/AiProviders/huggingface/index.js +++ b/server/utils/AiProviders/huggingface/index.js @@ -1,5 +1,4 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native"); -const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -26,11 +25,7 @@ class HuggingFaceLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for HuggingFaceLLM - falling back to Native for embedding!" - ); - this.embedder = !embedder ? new OpenAiEmbedder() : new NativeEmbedder(); + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.2; } diff --git a/server/utils/AiProviders/koboldCPP/index.js b/server/utils/AiProviders/koboldCPP/index.js index 4b1ff3f61b0..90186aabf46 100644 --- a/server/utils/AiProviders/koboldCPP/index.js +++ b/server/utils/AiProviders/koboldCPP/index.js @@ -26,11 +26,7 @@ class KoboldCPPLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!" - ); - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/liteLLM/index.js b/server/utils/AiProviders/liteLLM/index.js index 5973826cc8e..6152ea29613 100644 --- a/server/utils/AiProviders/liteLLM/index.js +++ b/server/utils/AiProviders/liteLLM/index.js @@ -26,11 +26,7 @@ class LiteLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for LiteLLM - falling back to NativeEmbedder for embedding!" - ); - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js index 48f689fbce2..6345fb04e09 100644 --- a/server/utils/AiProviders/lmStudio/index.js +++ b/server/utils/AiProviders/lmStudio/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -27,11 +28,7 @@ class LMStudioLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js index 5047752853f..8bc4d047d52 100644 --- a/server/utils/AiProviders/localAi/index.js +++ b/server/utils/AiProviders/localAi/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -19,11 +20,7 @@ class LocalAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/mistral/index.js b/server/utils/AiProviders/mistral/index.js index 8410d4cb642..c2546299f0e 100644 --- a/server/utils/AiProviders/mistral/index.js +++ b/server/utils/AiProviders/mistral/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -20,11 +21,7 @@ class MistralLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!" - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.0; } diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js index e13b68a2f83..0cbd87476a5 100644 --- a/server/utils/AiProviders/native/index.js +++ b/server/utils/AiProviders/native/index.js @@ -23,7 +23,7 @@ class NativeLLM { system: this.promptWindowLimit() * 0.15, user: this.promptWindowLimit() * 0.7, }; - this.embedder = embedder || new NativeEmbedder(); + this.embedder = embedder ?? new NativeEmbedder(); this.cacheDir = path.resolve( process.env.STORAGE_DIR ? path.resolve(process.env.STORAGE_DIR, "models", "downloaded") diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js index 73269d6d2ba..609a7601b71 100644 --- a/server/utils/AiProviders/ollama/index.js +++ b/server/utils/AiProviders/ollama/index.js @@ -3,6 +3,7 @@ const { writeResponseChunk, clientAbortedHandler, } = require("../../helpers/chat/responses"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); // Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md class OllamaAILLM { @@ -18,11 +19,7 @@ class OllamaAILLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index 43d7c287406..8037d23dc39 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -1,4 +1,4 @@ -const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -18,11 +18,7 @@ class OpenAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!" - ); - this.embedder = !embedder ? new OpenAiEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/openRouter/index.js b/server/utils/AiProviders/openRouter/index.js index a8301083565..bb8cf447192 100644 --- a/server/utils/AiProviders/openRouter/index.js +++ b/server/utils/AiProviders/openRouter/index.js @@ -36,7 +36,7 @@ class OpenRouterLLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; if (!fs.existsSync(cacheFolder)) diff --git a/server/utils/AiProviders/perplexity/index.js b/server/utils/AiProviders/perplexity/index.js index a17ec43f54c..d3f50de75df 100644 --- a/server/utils/AiProviders/perplexity/index.js +++ b/server/utils/AiProviders/perplexity/index.js @@ -28,7 +28,7 @@ class PerplexityLLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/textGenWebUI/index.js b/server/utils/AiProviders/textGenWebUI/index.js index dfce76a5a5e..72f116f8564 100644 --- a/server/utils/AiProviders/textGenWebUI/index.js +++ b/server/utils/AiProviders/textGenWebUI/index.js @@ -23,7 +23,7 @@ class TextGenWebUILLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js index 577a4b7427c..cdfef339714 100644 --- a/server/utils/AiProviders/togetherAi/index.js +++ b/server/utils/AiProviders/togetherAi/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -23,11 +24,7 @@ class TogetherAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM." - ); - this.embedder = embedder; + this.embedder = !embedder ? new NativeEmbedder() : embedder; this.defaultTemp = 0.7; }