θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
100 changes: 57 additions & 43 deletions frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,50 @@ import { useModal } from "@/hooks/useModal";
import ModalWrapper from "@/components/ModalWrapper";
import CTAButton from "@/components/lib/CTAButton";

const EMBEDDERS = [
{
name: "AnythingLLM Embedder",
value: "native",
logo: AnythingLLMIcon,
options: (settings) => <NativeEmbeddingOptions settings={settings} />,
description:
"Use the built-in embedding engine for AnythingLLM. Zero setup!",
},
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: (settings) => <OpenAiOptions settings={settings} />,
description: "The standard option for most non-commercial use.",
},
{
name: "Azure OpenAI",
value: "azure",
logo: AzureOpenAiLogo,
options: (settings) => <AzureAiOptions settings={settings} />,
description: "The enterprise option of OpenAI hosted on Azure services.",
},
{
name: "Local AI",
value: "localai",
logo: LocalAiLogo,
options: (settings) => <LocalAiOptions settings={settings} />,
description: "Run embedding models locally on your own machine.",
},
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: (settings) => <OllamaEmbeddingOptions settings={settings} />,
description: "Run embedding models locally on your own machine.",
},
];

export default function GeneralEmbeddingPreference() {
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
const [hasEmbeddings, setHasEmbeddings] = useState(false);
const [hasCachedEmbeddings, setHasCachedEmbeddings] = useState(false);
const [settings, setSettings] = useState(null);
const [loading, setLoading] = useState(true);
const [searchQuery, setSearchQuery] = useState("");
Expand All @@ -34,12 +74,24 @@ export default function GeneralEmbeddingPreference() {
const searchInputRef = useRef(null);
const { isOpen, openModal, closeModal } = useModal();

function embedderModelChanged(formEl) {
try {
const newModel = new FormData(formEl).get("EmbeddingModelPref") ?? null;
if (newModel === null) return false;
return settings?.EmbeddingModelPref !== newModel;
} catch (error) {
console.error(error);
}
return false;
}

const handleSubmit = async (e) => {
e.preventDefault();
if (
selectedEmbedder !== settings?.EmbeddingEngine &&
(selectedEmbedder !== settings?.EmbeddingEngine ||
embedderModelChanged(e.target)) &&
hasChanges &&
hasEmbeddings
(hasEmbeddings || hasCachedEmbeddings)
) {
openModal();
} else {
Expand Down Expand Up @@ -89,50 +141,12 @@ export default function GeneralEmbeddingPreference() {
setSettings(_settings);
setSelectedEmbedder(_settings?.EmbeddingEngine || "native");
setHasEmbeddings(_settings?.HasExistingEmbeddings || false);
setHasCachedEmbeddings(_settings?.HasCachedEmbeddings || false);
setLoading(false);
}
fetchKeys();
}, []);

const EMBEDDERS = [
{
name: "AnythingLLM Embedder",
value: "native",
logo: AnythingLLMIcon,
options: <NativeEmbeddingOptions settings={settings} />,
description:
"Use the built-in embedding engine for AnythingLLM. Zero setup!",
},
{
name: "OpenAI",
value: "openai",
logo: OpenAiLogo,
options: <OpenAiOptions settings={settings} />,
description: "The standard option for most non-commercial use.",
},
{
name: "Azure OpenAI",
value: "azure",
logo: AzureOpenAiLogo,
options: <AzureAiOptions settings={settings} />,
description: "The enterprise option of OpenAI hosted on Azure services.",
},
{
name: "Local AI",
value: "localai",
logo: LocalAiLogo,
options: <LocalAiOptions settings={settings} />,
description: "Run embedding models locally on your own machine.",
},
{
name: "Ollama",
value: "ollama",
logo: OllamaLogo,
options: <OllamaEmbeddingOptions settings={settings} />,
description: "Run embedding models locally on your own machine.",
},
];

useEffect(() => {
const filtered = EMBEDDERS.filter((embedder) =>
embedder.name.toLowerCase().includes(searchQuery.toLowerCase())
Expand Down Expand Up @@ -282,15 +296,15 @@ export default function GeneralEmbeddingPreference() {
{selectedEmbedder &&
EMBEDDERS.find(
(embedder) => embedder.value === selectedEmbedder
)?.options}
)?.options(settings)}
</div>
</div>
</form>
</div>
)}
<ModalWrapper isOpen={isOpen}>
<ChangeWarningModal
warningText="Switching the vector database will ignore previously embedded documents and future similarity search results. They will need to be re-added to each workspace."
warningText="Switching the embedding model will break previously embedded documents from working during chat. They will need to un-embed from every workspace and fully removed and re-uploaded so they can be embed by the new embedding model."
onClose={closeModal}
onConfirm={handleSaveSettings}
/>
Expand Down
4 changes: 3 additions & 1 deletion server/models/systemSettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ const SystemSettings = {
},
},
currentSettings: async function () {
const { hasVectorCachedFiles } = require("../utils/files");
const llmProvider = process.env.LLM_PROVIDER;
const vectorDB = process.env.VECTOR_DB;
return {
Expand All @@ -104,7 +105,8 @@ const SystemSettings = {
// Embedder Provider Selection Settings & Configs
// --------------------------------------------------------
EmbeddingEngine: process.env.EMBEDDING_ENGINE,
HasExistingEmbeddings: await this.hasEmbeddings(),
HasExistingEmbeddings: await this.hasEmbeddings(), // check if they have any currently embedded documents active in workspaces.
HasCachedEmbeddings: hasVectorCachedFiles(), // check if they any currently cached embedded docs.
EmbeddingBasePath: process.env.EMBEDDING_BASE_PATH,
EmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
EmbeddingModelMaxChunkLength:
Expand Down
14 changes: 14 additions & 0 deletions server/utils/files/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,19 @@ function normalizePath(filepath = "") {
return result;
}

// Check if the vector-cache folder is empty or not
// useful for it the user is changing embedders as this will
// break the previous cache.
function hasVectorCachedFiles() {
try {
return (
fs.readdirSync(vectorCachePath)?.filter((name) => name.endsWith(".json"))
.length !== 0
);
} catch {}
return false;
}

module.exports = {
findDocumentInDocuments,
cachedVectorInformation,
Expand All @@ -203,4 +216,5 @@ module.exports = {
normalizePath,
isWithin,
documentsPath,
hasVectorCachedFiles,
};