- There is no set up required when using AnythingLLM's native embedding
- engine.
+ {t("embedding.provider.description")}
);
diff --git a/frontend/src/components/TranscriptionSelection/NativeTranscriptionOptions/index.jsx b/frontend/src/components/TranscriptionSelection/NativeTranscriptionOptions/index.jsx
index 07ee12126ae..4a8948664e6 100644
--- a/frontend/src/components/TranscriptionSelection/NativeTranscriptionOptions/index.jsx
+++ b/frontend/src/components/TranscriptionSelection/NativeTranscriptionOptions/index.jsx
@@ -1,19 +1,21 @@
import { Gauge } from "@phosphor-icons/react";
+import { useTranslation } from "react-i18next";
+
export default function NativeTranscriptionOptions() {
+ const { t } = useTranslation();
return (
- Using the local whisper model on machines with limited RAM or CPU
- can stall AnythingLLM when processing media files.
+ {t("transcription.warn-start")}
- We recommend at least 2GB of RAM and upload files <10Mb.
+ {t("transcription.warn-recommend")}
- The built-in model will automatically download on the first use.
+ {t("transcription.warn-end")}
@@ -21,7 +23,7 @@ export default function NativeTranscriptionOptions() {
);
diff --git a/frontend/src/locales/en/common.js b/frontend/src/locales/en/common.js
index d71c469755b..9f588b874c9 100644
--- a/frontend/src/locales/en/common.js
+++ b/frontend/src/locales/en/common.js
@@ -1,6 +1,14 @@
const TRANSLATIONS = {
common: {
"workspaces-name": "Workspaces Name",
+ error: "error",
+ success: "success",
+ user: "User",
+ selection: "Model Selection",
+ saving: "Saving...",
+ save: "Save changes",
+ previous: "Previous Page",
+ next: "Next Page"
},
// Setting Sidebar menu items.
@@ -83,47 +91,301 @@ const TRANSLATIONS = {
search: "Search all LLM providers",
},
model: {
- title:"Workspace Chat model",
+ title: "Workspace Chat model",
description: "The specific chat model that will be used for this workspace. If empty, will use the system LLM preference.",
- wait:"-- waiting for models --"
+ wait: "-- waiting for models --"
},
- mode:{
+ mode: {
title: "Chat mode",
- chat:{
+ chat: {
title: "Chat",
"desc-start": "will provide answers with the LLM's general knowledge",
and: "and",
"desc-end": "document context that is found.",
},
- query:{
+ query: {
title: "Query",
"desc-start": "will provide answers",
only: "only",
"desc-end": "if document context is found.",
}
},
- history:{
+ history: {
title: "Chat History",
"desc-start": "The number of previous chats that will be included in the response's short-term memory.",
- recommend : "Recommend 20. ",
+ recommend: "Recommend 20. ",
"desc-end": "AAnything more than 45 is likely to lead to continuous chat failures depending on message size."
},
- prompt:{
+ prompt: {
title: "Prompt",
description: "The prompt that will be used on this workspace. Define the context and instructions for the AI to generate a response. You should to provide a carefully crafted prompt so the AI can generate a relevant and accurate response.",
},
- refusal:{
- title:"Query mode refusal response",
+ refusal: {
+ title: "Query mode refusal response",
"desc-start": "When in",
query: "query",
"desc-end": "mode, you may want to return a custom refusal response when no context is found.",
},
- temperature:{
+ temperature: {
title: "LLM Temperature",
"desc-start": 'This setting controls how "random" or dynamic your chat responses will be.',
"desc-end": "The higher the number (1.0 maximum) the more random and incoherent.",
"recommend": "Recommended:",
+ },
+ },
+
+ // Vector Database
+ "vector-workspace": {
+ identifier: "Vector database identifier",
+ snippets: {
+ title: "Max Context Snippets",
+ description: "This setting controls the maximum amount of context snippets the will be sent to the LLM for per chat or query.",
+ recommend: "Recommended: 4",
+ },
+ doc: {
+ title: "Document similarity threshold",
+ description: "The minimum similarity score required for a source to be considered related to the chat. The higher the number, the more similar the source must be to the chat.",
+ zero: "No restriction",
+ low: "Low (similarity score ≥ .25)",
+ medium: "Medium (similarity score ≥ .50)",
+ high: "High (similarity score ≥ .75)",
+ },
+ reset: {
+ reset: "Reset Vector Database",
+ resetting: "Clearing vectors...",
+ confirm: "You are about to reset this workspace's vector database. This will remove all vector embeddings currently embedded.\n\nThe original source files will remain untouched. This action is irreversible.",
+ error: "Workspace vector database could not be reset!",
+ success: "Workspace vector database was reset!",
+ },
+ },
+
+ // Agent Configuration
+ agent: {
+ "performance-warning": "Performance of LLMs that do not explicitly support tool-calling is highly dependent on the model's capabilities and accuracy. Some abilities may be limited or non-functional.",
+ provider: {
+ title: "Workspace Agent LLM Provider",
+ description: "The specific LLM provider & model that will be used for this workspace's @agent agent.",
+ },
+ mode: {
+ chat: {
+ title: "Workspace Agent Chat model",
+ description: "The specific chat model that will be used for this workspace's @agent agent.",
+ },
+ title: "Workspace Agent model",
+ description: "The specific LLM model that will be used for this workspace's @agent agent.",
+ wait: "-- waiting for models --"
+ },
+
+ skill: {
+ title: "Default agent skills",
+ description: "Improve the natural abilities of the default agent with these pre-built skills. This set up applies to all workspaces.",
+ rag: {
+ title: "RAG & long-term memory",
+ description: 'Allow the agent to leverage your local documents to answer a query or ask the agent to "remember" pieces of content for long-term memory retrieval.',
+ },
+ view: {
+ title: "View & summarize documents",
+ description: "Allow the agent to list and summarize the content of workspace files currently embedded.",
+ },
+ scrape: {
+ title: "Scrape websites",
+ description: "Allow the agent to visit and scrape the content of websites.",
+ },
+ generate: {
+ title: "Generate charts",
+ description: "Enable the default agent to generate various types of charts from data provided or given in chat.",
+ },
+ save: {
+ title: "Generate & save files to browser",
+ description: "Enable the default agent to generate and write to files that save and can be downloaded in your browser.",
+ },
+ web: {
+ title: "Live web search and browsing",
+ "desc-start": "Enable your agent to search the web to answer your questions by connecting to a web-search (SERP) provider.",
+ "desc-end": "Web search during agent sessions will not work until this is set up.",
+ },
+ },
+ },
+
+ // Workspace Chats
+ recorded: {
+ title: "Workspace Chats",
+ description: "These are all the recorded chats and messages that have been sent by users ordered by their creation date.",
+ export: "Export",
+ table: {
+ id: "Id",
+ by: "Sent By",
+ workspace: "Workspace",
+ prompt: "Prompt",
+ response: "Response",
+ at: "Sent At",
+ }
+ },
+
+ // Appearance
+ appearance: {
+ title: "Appearance",
+ description: "Customize the appearance settings of your platform.",
+ logo: {
+ title: "Customize Logo",
+ description: "Upload your custom logo to make your chatbot yours.",
+ add: "Add a custom logo",
+ recommend: "Recommended size: 800 x 200",
+ remove: "Remove",
+ replace: "Replace",
+ },
+ message: {
+ title: "Customize Messages",
+ description: "Customize the automatic messages displayed to your users.",
+ new: "New",
+ system: "system",
+ user: "user",
+ message: "message",
+ assistant: "AnythingLLM Chat Assistant",
+ "double-click": "Double click to edit...",
+ save: "Save Messages",
+ },
+ icons: {
+ title: "Custom Footer Icons",
+ description: "Customize the footer icons displayed on the bottom of the sidebar.",
+ icon: "Icon",
+ link: "Link",
}
+ },
+
+ // API Keys
+ api: {
+ title: "API Keys",
+ description: "API keys allow the holder to programmatically access and manage this AnythingLLM instance.",
+ link: "Read the API documentation",
+ generate: "Generate New API Key",
+ table: {
+ key: "API Key",
+ by: "Created By",
+ created: "Created",
+ }
+ },
+
+ llm: {
+ title:"LLM Preference",
+ description:"These are the credentials and settings for your preferred LLM chat & embedding provider. Its important these keys are current and correct or else AnythingLLM will not function properly.",
+ provider: "LLM Provider",
+ },
+
+ transcription: {
+ title: "Transcription Model Preference",
+ description: "These are the credentials and settings for your preferred transcription model provider. Its important these keys are current and correct or else media files and audio will not transcribe.",
+ provider: "Transcription Provider",
+ "warn-start": "Using the local whisper model on machines with limited RAM or CPU can stall AnythingLLM when processing media files.",
+ "warn-recommend": "We recommend at least 2GB of RAM and upload files <10Mb.",
+ "warn-end": "The built-in model will automatically download on the first use.",
+ },
+
+ embedding: {
+ title: "Embedding Preference",
+ "desc-start": "When using an LLM that does not natively support an embedding engine - you may need to additionally specify credentials to for embedding text.",
+ "desc-end": "Embedding is the process of turning text into vectors. These credentials are required to turn your files and prompts into a format which AnythingLLM can use to process.",
+ provider: {
+ title: "Embedding Provider",
+ description: "There is no set up required when using AnythingLLM's native embedding engine.",
+ },
+ },
+
+ text: {
+ title: "Text splitting & Chunking Preferences",
+ "desc-start": "Sometimes, you may want to change the default way that new documents are split and chunked before being inserted into your vector database.",
+ "desc-end": "You should only modify this setting if you understand how text splitting works and it's side effects.",
+ "warn-start": "Changes here will only apply to",
+ "warn-center": "newly embedded documents",
+ "warn-end": ", not existing documents.",
+ size: {
+ title: "Text Chunk Size",
+ description: "This is the maximum length of characters that can be present in a single vector.",
+ recommend: "Embed model maximum length is",
+ },
+
+ overlap: {
+ title: "Text Chunk Overlap",
+ description: "This is the maximum overlap of characters that occurs during chunking between two adjacent text chunks.",
+ }
+ },
+
+ // Vector Database
+ vector: {
+ title: "Vector Database",
+ description: "These are the credentials and settings for how your AnythingLLM instance will function. It's important these keys are current and correct.",
+ provider: {
+ title: "Vector Database Provider",
+ description: "There is no configuration needed for LanceDB.",
+ }
+ },
+
+ // Embeddable Chat Widgets
+ embeddable: {
+ title: "Embeddable Chat Widgets",
+ description: "Embeddable chat widgets are public facing chat interfaces that are tied to a single workspace. These allow you to build workspaces that then you can publish to the world.",
+ create: "Create embed",
+ table: {
+ workspace: "Workspace",
+ chats: "Sent Chats",
+ Active: "Active Domains",
+ }
+ },
+
+ "embed-chats": {
+ title: "Embed Chats",
+ description: "These are all the recorded chats and messages from any embed that you have published.",
+ table: {
+ embed: "Embed",
+ sender: "Sender",
+ message: "Message",
+ response: "Response",
+ at: "Sent At",
+
+ }
+ },
+
+ multi: {
+ title: "Multi-User Mode",
+ description: "Set up your instance to support your team by activating Multi-User Mode.",
+ enable: {
+ "is-enable": "Multi-User Mode is Enabled",
+ enable: "Enable Multi-User Mode",
+ description: "By default, you will be the only admin. As an admin you will need to create accounts for all new users or admins. Do not lose your password as only an Admin user can reset passwords.",
+ username: "Admin account username",
+ password: "Admin account password",
+ },
+ password: {
+ title: "Password Protection",
+ description: "Protect your AnythingLLM instance with a password. If you forget this there is no recovery method so ensure you save this password."
+ },
+ instance: {
+ title: "Password Protect Instance",
+ description: "By default, you will be the only admin. As an admin you will need to create accounts for all new users or admins. Do not lose your password as only an Admin user can reset passwords.",
+ password: "Instance password",
+ }
+ },
+
+ // Event Logs
+ event: {
+ title: "Event Logs",
+ description: "View all actions and events happening on this instance for monitoring.",
+ clear: "Clear Event Logs",
+ table: {
+ type: "Event Type",
+ user: "User",
+ occurred: "Occurred At",
+ }
+ },
+
+ // Privacy & Data-Handling
+ privacy:{
+ title:"Privacy & Data-Handling",
+ description:"This is your configuration for how connected third party providers and AnythingLLM handle your data.",
+ llm:"LLM Selection",
+ embedding:"Embedding Preference",
+ vector:"Vector Database",
+ anonymous:"Anonymous Telemetry Enabled",
}
};
diff --git a/frontend/src/locales/zh/common.js b/frontend/src/locales/zh/common.js
index 513b3f37d06..f06871e2a23 100644
--- a/frontend/src/locales/zh/common.js
+++ b/frontend/src/locales/zh/common.js
@@ -2,6 +2,14 @@
const TRANSLATIONS = {
common: {
"workspaces-name": "工作区名称",
+ error: "错误",
+ success: "成功",
+ user: "用户",
+ selection: "模型选择",
+ save: "保存更改",
+ saving: "保存中...",
+ previous: "上一页",
+ next: "下一页"
},
// Setting Sidebar menu items.
@@ -68,7 +76,7 @@ const TRANSLATIONS = {
image: "工作区图像",
remove: "移除工作区图像",
},
- delete:{
+ delete: {
delete: "删除工作区",
deleting: "正在删除工作区...",
"confirm-start": "您即将删除整个",
@@ -86,47 +94,298 @@ const TRANSLATIONS = {
model: {
title: "工作区聊天模型",
description: "将用于此工作区的特定聊天模型。如果为空,将使用系统LLM首选项。",
- wait:"-- 等待模型 --",
+ wait: "-- 等待模型 --",
},
- mode:{
+ mode: {
title: "聊天模式",
- chat:{
+ chat: {
title: "聊天",
"desc-start": "将提供法学硕士的一般知识",
and: "和",
"desc-end": "找到的文档上下文的答案。",
},
- query:{
+ query: {
title: "查询",
"desc-start": "将",
only: "仅",
"desc-end": "提供找到的文档上下文的答案。",
}
},
- history:{
+ history: {
title: "聊天历史记录",
"desc-start": "将包含在响应的短期记忆中的先前聊天的数量。",
recommend: "推荐 20。",
"desc-end": "任何超过 45 的值都可能导致连续聊天失败,具体取决于消息大小。",
},
- prompt:{
+ prompt: {
title: "聊天提示",
description: "将在此工作区上使用的提示。定义 AI 生成响应的上下文和指令。您应该提供精心设计的提示,以便人工智能可以生成相关且准确的响应。",
},
- refusal:{
- title:"查询模式拒绝响应",
+ refusal: {
+ title: "查询模式拒绝响应",
"desc-start": "当处于",
query: "查询",
"desc-end": "模式时,当未找到上下文时,您可能希望返回自定义拒绝响应。",
},
- temperature:{
+ temperature: {
title: "LLM Temperature",
"desc-start": '此设置控制您的聊天响应的"随机"或动态程度。',
"desc-end": "数字越高(最大为 1.0),随机性和不连贯性就越强。",
recommend: "推荐:",
},
},
+
+ // Vector Database Settings
+ "vector-workspace": {
+ identifier: "向量数据库标识符",
+ snippets: {
+ title: "最大上下文片段",
+ description: "此设置控制每次聊天或查询将发送到 LLM 的上下文片段的最大数量。",
+ recommend: "推荐: 4",
+ },
+ doc: {
+ title: "文档相似性阈值",
+ description: "源被视为与聊天相关所需的最低相似度分数。数字越高,来源与聊天就越相似。",
+ zero: "无限制",
+ low: "低(相似度分数 ≥ .25)",
+ medium: "中(相似度分数 ≥ .50)",
+ high: "高(相似度分数 ≥ .75)",
+ },
+ reset: {
+ reset: "重置向量数据库",
+ resetting: "清除向量...",
+ confirm: "您将重置此工作区的矢量数据库。这将删除当前嵌入的所有矢量嵌入。\n\n原始源文件将保持不变。此操作是不可逆转的。",
+ success: "向量数据库已重置。",
+ error: "无法重置工作区向量数据库!",
+ },
+ },
+
+ // Agent Configuration
+ agent: {
+ "performance-warning": "不明确支持工具调用的 LLMs 的性能高度依赖于模型的功能和准确性。有些能力可能受到限制或不起作用。",
+ provider: {
+ title: "工作区代理 LLM 提供商",
+ description: "将用于此工作区的 @agent 代理的特定 LLM 提供商和模型。",
+ },
+ mode: {
+ chat: {
+ title: "工作区代理聊天模型",
+ description: "将用于此工作区的 @agent 代理的特定聊天模型。",
+ },
+ title: "工作区代理模型",
+ description: "将用于此工作区的 @agent 代理的特定 LLM 模型。",
+ wait: "-- 等待模型 --"
+ },
+ skill: {
+ title: "默认代理技能",
+ description: "使用这些预构建的技能提高默认代理的自然能力。此设置适用于所有工作区。",
+ rag: {
+ title: "RAG和长期记忆",
+ description: '允许代理利用您的本地文档来回答查询,或要求代理"记住"长期记忆检索的内容片段。',
+ },
+ view: {
+ title: "查看和总结文档",
+ description: "允许代理列出和总结当前嵌入的工作区文件的内容。",
+ },
+ scrape: {
+ title: "抓取网站",
+ description: "允许代理访问和抓取网站的内容。",
+ },
+ generate: {
+ title: "生成图表",
+ description: "使默认代理能够从提供的数据或聊天中生成各种类型的图表。",
+ },
+ save: {
+ title: "生成并保存文件到浏览器",
+ description: "使默认代理能够生成并写入文件,这些文件可以保存并在您的浏览器中下载。",
+ },
+ web: {
+ title: "实时网络搜索和浏览",
+ "desc-start": "通过连接到网络搜索(SERP)提供者,使您的代理能够搜索网络以回答您的问题。",
+ "desc-end": "在代理会话期间,网络搜索将不起作用,直到此设置完成。",
+ },
+ },
+ },
+
+ // Workspace Chat
+ recorded: {
+ title: "工作区聊天历史记录",
+ description: "这些是用户发送的所有聊天记录和消息,按创建日期排序。",
+ export: "导出",
+ table: {
+ id: "Id",
+ by: "Sent By",
+ workspace: "Workspace",
+ prompt: "Prompt",
+ response: "Response",
+ at: "Sent At",
+ }
+ },
+
+ appearance: {
+ title: "外观",
+ description: "自定义平台的外观设置。",
+ logo: {
+ title: "自定义图标",
+ description: "上传您的自定义图标,让您的聊天机器人成为您的。",
+ add: "添加自定义图标",
+ recommended: "建议尺寸:800 x 200",
+ remove: "移除",
+ replace: "替换",
+ },
+ message: {
+ title: "自定义消息",
+ description: "自定义向用户显示的自动消息。",
+ new: "新建",
+ system: "系统",
+ user: "用户",
+ message: "消息",
+ assistant: "AnythingLLM 聊天助手",
+ "double-click": "双击以编辑...",
+ save: "保存消息",
+ },
+ icons: {
+ title: "自定义页脚图标",
+ description: "自定义侧边栏底部显示的页脚图标。",
+ icon: "图标",
+ link: "链接",
+ }
+ },
+
+ // API Keys
+ api: {
+ title: "API 密钥",
+ description: "API 密钥允许持有者以编程方式访问和管理此 AnythingLLM 实例。",
+ link: "阅读 API 文档",
+ generate: "生成新的 API 密钥",
+ table: {
+ key: "API 密钥",
+ by: "创建者",
+ created: "创建",
+ }
+ },
+
+ // LLM Preferences
+ llm: {
+ title: "LLM 偏好",
+ description: "这些是您首选的 LLM 聊天和嵌入提供商的凭据和设置。重要的是,这些密钥是最新的和正确的,否则 AnythingLLM 将无法正常运行。",
+ provider: "LLM 提供商",
+ },
+
+ transcription: {
+ title: "转录模型偏好",
+ description: "这些是您的首选转录模型提供商的凭据和设置。重要的是这些密钥是最新且正确的,否则媒体文件和音频将无法转录。",
+ provider: "转录提供商",
+ "warn-start": "在 RAM 或 CPU 有限的计算机上使用本地耳语模型可能会在处理媒体文件时停止 AnythingLLM。",
+ "warn-recommend": "我们建议至少 2GB RAM 并上传 <10Mb 的文件。",
+ "warn-end": "内置模型将在首次使用时自动下载。",
+ },
+
+ embedding: {
+ title: "嵌入首选项",
+ "desc-start": "当使用本身不支持嵌入引擎的 LLM 时,您可能需要额外指定用于嵌入文本的凭据。",
+ "desc-end": "嵌入是将文本转换为矢量的过程。需要这些凭据才能将您的文件和提示转换为 AnythingLLM 可以用来处理的格式。",
+ provider: {
+ title: "嵌入引擎提供商",
+ description: "使用 AnythingLLM 的本机嵌入引擎时不需要设置。",
+ }
+ },
+
+ text: {
+ title: "文本拆分和分块首选项",
+ "desc-start": "有时,您可能希望更改新文档在插入到矢量数据库之前拆分和分块的默认方式。",
+ "desc-end": "只有在了解文本拆分的工作原理及其副作用时,才应修改此设置。",
+ "warn-start": "此处的更改仅适用于",
+ "warn-center": "新嵌入的文档",
+ "warn-end": ",而不是现有文档。",
+ size: {
+ title: "文本块大小",
+ description: "这是单个向量中可以存在的字符的最大长度。",
+ recommend: "嵌入模型的最大长度为",
+ },
+ overlap: {
+ title: "文本块重叠",
+ description: "这是在两个相邻文本块之间分块期间发生的最大字符重叠。",
+ }
+ },
+
+ // Vector Database
+ vector: {
+ title: "向量数据库",
+ description: "这些是 AnythingLLM 实例如何运行的凭据和设置。重要的是,这些密钥是最新的和正确的。",
+ provider: {
+ title: "向量数据库提供商",
+ description: "LanceDB 不需要任何配置。",
+ },
+ },
+
+ // Embeddable Chats
+ embeddable: {
+ title: "可嵌入的聊天小部件",
+ description: "可嵌入的聊天小部件是与单个工作区绑定的面向公众的聊天界面。这些允许您构建工作区,然后您可以将其发布到全世界。",
+ create: "创建嵌入式对话",
+ table: {
+ workspace: "工作区",
+ chats: "已发送聊天",
+ Active: "活动域",
+ }
+ },
+
+ // Embeddable Chat History
+ "embed-chats": {
+ title: "嵌入聊天",
+ description: "这些是您发布的任何嵌入的所有记录的聊天和消息。",
+ table: {
+ embed: "嵌入",
+ sender: "发送者",
+ message: "消息",
+ response: "响应",
+ at: "发送于",
+ },
+ },
+
+ multi: {
+ title: "多用户模式",
+ description: "通过激活多用户模式来设置您的实例以支持您的团队。",
+ enable: {
+ "is-enable": "多用户模式已启用",
+ enable: "启用多用户模式",
+ description: "默认情况下,您将是唯一的管理员。作为管理员,您需要为所有新用户或管理员创建账户。不要丢失您的密码,因为只有管理员用户可以重置密码。",
+ username: "管理员账户用户名",
+ password: "管理员账户密码",
+ },
+ password: {
+ title: "密码保护",
+ description: "用密码保护您的AnythingLLM实例。如果您忘记了密码,那么没有恢复方法,所以请确保保存这个密码。",
+ },
+ instance: {
+ title: "实例密码保护",
+ description: "默认情况下,您将是唯一的管理员。作为管理员,您需要为所有新用户或管理员创建账户。不要丢失您的密码,因为只有管理员用户可以重置密码。",
+ password: "实例密码",
+ }
+ },
+
+ // Event Logs
+ event: {
+ title: "事件日志",
+ description: "查看此实例上发生的所有操作和事件以进行监控。",
+ clear: "清除事件日志",
+ table: {
+ type: "事件类型",
+ user: "用户",
+ occurred: "发生时间",
+ }
+ },
+
+ // Privacy & Data-Handling
+ privacy: {
+ title: "隐私和数据处理",
+ description: "这是您对如何处理连接的第三方提供商和AnythingLLM的数据的配置。",
+ llm: "LLM选择",
+ embedding: "嵌入偏好",
+ vector: "向量数据库",
+ anonymous: "启用匿名遥测",
+ }
};
-export default TRANSLATIONS;
-
\ No newline at end of file
+export default TRANSLATIONS;
\ No newline at end of file
diff --git a/frontend/src/pages/Admin/Logging/index.jsx b/frontend/src/pages/Admin/Logging/index.jsx
index 49824784963..389263f4b01 100644
--- a/frontend/src/pages/Admin/Logging/index.jsx
+++ b/frontend/src/pages/Admin/Logging/index.jsx
@@ -7,6 +7,7 @@ import * as Skeleton from "react-loading-skeleton";
import LogRow from "./LogRow";
import showToast from "@/utils/toast";
import CTAButton from "@/components/lib/CTAButton";
+import { useTranslation } from "react-i18next";
export default function AdminLogs() {
const query = useQuery();
@@ -14,6 +15,7 @@ export default function AdminLogs() {
const [logs, setLogs] = useState([]);
const [offset, setOffset] = useState(Number(query.get("offset") || 0));
const [canNext, setCanNext] = useState(false);
+ const { t } = useTranslation();
useEffect(() => {
async function fetchLogs() {
@@ -62,12 +64,11 @@ export default function AdminLogs() {
- Event Logs
+ {t("event.title")}
- View all actions and events happening on this instance for
- monitoring.
+ {t("event.description")}
- Embeddable chat widgets are public facing chat interfaces that are
- tied to a single workspace. These allow you to build workspaces
- that then you can publish to the world.
+ {t("embeddable.description")}
- Create embed
+ {t("embeddable.create")}
@@ -52,6 +51,7 @@ export default function EmbedConfigs() {
function EmbedContainer() {
const [loading, setLoading] = useState(true);
const [embeds, setEmbeds] = useState([]);
+ const { t } = useTranslation();
useEffect(() => {
async function fetchUsers() {
@@ -81,13 +81,13 @@ function EmbedContainer() {
- Workspace
+ {t("embeddable.table.workspace")}
- Sent Chats
+ {t("embeddable.table.chats")}
- Active Domains
+ {t("embeddable.table.Active")}
{" "}
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
index 8f234b5ac1d..15e84467214 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
@@ -25,6 +25,7 @@ import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import { useModal } from "@/hooks/useModal";
import ModalWrapper from "@/components/ModalWrapper";
import CTAButton from "@/components/lib/CTAButton";
+import { useTranslation } from "react-i18next";
const EMBEDDERS = [
{
@@ -93,6 +94,7 @@ export default function GeneralEmbeddingPreference() {
const [searchMenuOpen, setSearchMenuOpen] = useState(false);
const searchInputRef = useRef(null);
const { isOpen, openModal, closeModal } = useModal();
+ const { t } = useTranslation();
function embedderModelChanged(formEl) {
try {
@@ -204,17 +206,13 @@ export default function GeneralEmbeddingPreference() {
- Embedding Preference
+ {t("embedding.title")}
- When using an LLM that does not natively support an embedding
- engine - you may need to additionally specify credentials to
- for embedding text.
+ {t("embedding.desc-start")}
- Embedding is the process of turning text into vectors. These
- credentials are required to turn your files and prompts into a
- format which AnythingLLM can use to process.
+ {t("embedding.desc-end")}
{searchMenuOpen && (
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingTextSplitterPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingTextSplitterPreference/index.jsx
index 5ee1197f117..fd7d7a5b78f 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingTextSplitterPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingTextSplitterPreference/index.jsx
@@ -6,6 +6,7 @@ import CTAButton from "@/components/lib/CTAButton";
import Admin from "@/models/admin";
import showToast from "@/utils/toast";
import { nFormatter, numberWithCommas } from "@/utils/numbers";
+import { useTranslation } from "react-i18next";
function isNullOrNaN(value) {
if (value === null) return true;
@@ -17,6 +18,7 @@ export default function EmbeddingTextSplitterPreference() {
const [loading, setLoading] = useState(true);
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
+ const { t } = useTranslation();
const handleSubmit = async (e) => {
e.preventDefault();
@@ -86,25 +88,22 @@ export default function EmbeddingTextSplitterPreference() {
- Text splitting & Chunking Preferences
+ {t("text.title")}
- Sometimes, you may want to change the default way that new
- documents are split and chunked before being inserted into
- your vector database.
- You should only modify this setting if you understand how text
- splitting works and it's side effects.
+ {t("text.desc-start")}
+ {t("text.desc-end")}
- Changes here will only apply to{" "}
- newly embedded documents, not existing documents.
+ {t("text.warn-start")}{" "}
+ {t("text.warn-center")}{t("text.warn-end")}
- These are the credentials and settings for your preferred LLM
- chat & embedding provider. Its important these keys are
- current and correct or else AnythingLLM will not function
- properly.
+ {t("llm.description")}
@@ -321,7 +320,7 @@ export default function GeneralLLMPreference() {
)}
- By default, you will be the only admin. As an admin you will
- need to create accounts for all new users or admins. Do not lose
- your password as only an Admin user can reset passwords.
+ {t("multi.enable.description")}
- Protect your AnythingLLM instance with a password. If you forget
- this there is no recovery method so ensure you save this password.
+ {t("multi.password.description")}
)}
@@ -284,7 +283,7 @@ function PasswordProtection() {
- By default, you will be the only admin. As an admin you will
- need to create accounts for all new users or admins. Do not lose
- your password as only an Admin user can reset passwords.
+ {t("multi.instance.description")}
diff --git a/frontend/src/pages/GeneralSettings/TranscriptionPreference/index.jsx b/frontend/src/pages/GeneralSettings/TranscriptionPreference/index.jsx
index 5fbd196c328..59b06233947 100644
--- a/frontend/src/pages/GeneralSettings/TranscriptionPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/TranscriptionPreference/index.jsx
@@ -11,6 +11,7 @@ import NativeTranscriptionOptions from "@/components/TranscriptionSelection/Nati
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
import CTAButton from "@/components/lib/CTAButton";
+import { useTranslation } from "react-i18next";
export default function TranscriptionModelPreference() {
const [saving, setSaving] = useState(false);
@@ -22,6 +23,7 @@ export default function TranscriptionModelPreference() {
const [selectedProvider, setSelectedProvider] = useState(null);
const [searchMenuOpen, setSearchMenuOpen] = useState(false);
const searchInputRef = useRef(null);
+ const { t } = useTranslation();
const handleSubmit = async (e) => {
e.preventDefault();
@@ -119,14 +121,11 @@ export default function TranscriptionModelPreference() {
- Transcription Model Preference
+ {t("transcription.title")}
- These are the credentials and settings for your preferred
- transcription model provider. Its important these keys are
- current and correct or else media files and audio will not
- transcribe.
+ {t("transcription.description")}
@@ -140,7 +139,7 @@ export default function TranscriptionModelPreference() {
)}
- These are the credentials and settings for how your
- AnythingLLM instance will function. It's important these keys
- are current and correct.
+ {t("vector.description")}
{searchMenuOpen && (
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 51c11581733..adf6dc6948a 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -4,6 +4,7 @@ import AgentLLMItem from "./AgentLLMItem";
import { AVAILABLE_LLM_PROVIDERS } from "@/pages/GeneralSettings/LLMPreference";
import { CaretUpDown, Gauge, MagnifyingGlass, X } from "@phosphor-icons/react";
import AgentModelSelection from "../AgentModelSelection";
+import { useTranslation } from "react-i18next";
const ENABLED_PROVIDERS = [
"openai",
@@ -65,7 +66,7 @@ export default function AgentLLMSelection({
const [searchQuery, setSearchQuery] = useState("");
const [searchMenuOpen, setSearchMenuOpen] = useState(false);
const searchInputRef = useRef(null);
-
+ const { t } = useTranslation();
function updateLLMChoice(selection) {
setSearchQuery("");
setSelectedLLM(selection);
@@ -97,9 +98,7 @@ export default function AgentLLMSelection({
- Performance of LLMs that do not explicitly support tool-calling is
- highly dependent on the model's capabilities and accuracy. Some
- abilities may be limited or non-functional.
+ {t("agent.performance-warning")}
@@ -107,11 +106,10 @@ export default function AgentLLMSelection({
- The specific LLM provider & model that will be used for this
- workspace's @agent agent.
+ {t("agent.provider.description")}
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
index bf51cb87eea..270f22ef94f 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
@@ -1,6 +1,7 @@
import useGetProviderModels, {
DISABLED_PROVIDERS,
} from "@/hooks/useGetProvidersModels";
+import { useTranslation } from "react-i18next";
// These models do NOT support function calling
function supportedModel(provider, model = "") {
@@ -19,6 +20,8 @@ export default function AgentModelSelection({
}) {
const { defaultModels, customModels, loading } =
useGetProviderModels(provider);
+
+ const { t } = useTranslation();
if (DISABLED_PROVIDERS.includes(provider)) return null;
if (loading) {
@@ -26,11 +29,10 @@ export default function AgentModelSelection({
- The specific chat model that will be used for this workspace's
- @agent agent.
+ {t("agent.mode.chat.description")}
@@ -51,11 +53,10 @@ export default function AgentModelSelection({
- The specific LLM model that will be used for this workspace's @agent
- agent.
+ {t("agent.mode.description")}
- Enable your agent to search the web to answer your questions by
- connecting to a web-search (SERP) provider.
+ {t("agent.skill.web.desc-start")}
- Web search during agent sessions will not work until this is set up.
+ {t("agent.skill.web.desc-end")}
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/index.jsx
index c96cc12469b..56b82e00d93 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/index.jsx
@@ -9,6 +9,7 @@ import GenericSkill from "./GenericSkill";
import Admin from "@/models/admin";
import * as Skeleton from "react-loading-skeleton";
import "react-loading-skeleton/dist/skeleton.css";
+import { useTranslation } from "react-i18next";
export default function WorkspaceAgentConfiguration({ workspace }) {
const [settings, setSettings] = useState({});
@@ -143,17 +144,17 @@ function LoadingSkeleton() {
}
function AvailableAgentSkills({ skills, settings, toggleAgentSkill }) {
+ const { t } = useTranslation();
return (
- Improve the natural abilities of the default agent with these
- pre-built skills. This set up applies to all workspaces.
+ {t("agent.skill.description")}
- The minimum similarity score required for a source to be considered
- related to the chat. The higher the number, the more similar the
- source must be to the chat.
+ {t("vector-workspace.doc.description")}
);
diff --git a/frontend/src/pages/WorkspaceSettings/VectorDatabase/MaxContextSnippets/index.jsx b/frontend/src/pages/WorkspaceSettings/VectorDatabase/MaxContextSnippets/index.jsx
index c66ccfd7199..80aecdc9c38 100644
--- a/frontend/src/pages/WorkspaceSettings/VectorDatabase/MaxContextSnippets/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/VectorDatabase/MaxContextSnippets/index.jsx
@@ -1,15 +1,17 @@
+import { useTranslation } from "react-i18next";
+
export default function MaxContextSnippets({ workspace, setHasChanges }) {
+ const { t } = useTranslation();
return (
- This setting controls the maximum amount of context snippets the will
- be sent to the LLM for per chat or query.
+ {t("vector-workspace.snippets.description")}
- Recommended: 4
+ {t("vector-workspace.snippets.recommend")}
{
if (
!window.confirm(
- `You are about to reset this workspace's vector database. This will remove all vector embeddings currently embedded.\n\nThe original source files will remain untouched. This action is irreversible.`
+ `${t("vector-workspace.reset.confirm")}`
)
)
return false;
@@ -16,14 +17,14 @@ export default function ResetDatabase({ workspace }) {
setDeleting(true);
const success = await Workspace.wipeVectorDb(workspace.slug);
if (!success) {
- showToast("Workspace vector database could not be reset!", "error", {
+ showToast(t("vector-workspace.reset.error"), t("vector-workspace.common.error"), {
clear: true,
});
setDeleting(false);
return;
}
- showToast("Workspace vector database was reset!", "success", {
+ showToast(t("vector-workspace.reset.success"), t("vector-workspace.common.success"), {
clear: true,
});
setDeleting(false);
@@ -36,7 +37,7 @@ export default function ResetDatabase({ workspace }) {
type="button"
className="border-none w-fit transition-all duration-300 border border-transparent rounded-lg whitespace-nowrap text-sm px-5 py-2.5 focus:z-10 bg-red-500/25 text-red-200 hover:text-white hover:bg-red-600 disabled:bg-red-600 disabled:text-red-200 disabled:animate-pulse"
>
- {deleting ? "Clearing vectors..." : "Reset Workspace Vector Database"}
+ {deleting ? t("vector-workspace.reset.resetting") : t("vector-workspace.reset.reset")}
);
}
diff --git a/frontend/src/pages/WorkspaceSettings/VectorDatabase/VectorDBIdentifier/index.jsx b/frontend/src/pages/WorkspaceSettings/VectorDatabase/VectorDBIdentifier/index.jsx
index 9140d7fc0f7..1101299426c 100644
--- a/frontend/src/pages/WorkspaceSettings/VectorDatabase/VectorDBIdentifier/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/VectorDatabase/VectorDBIdentifier/index.jsx
@@ -1,7 +1,10 @@
+import { useTranslation } from "react-i18next";
+
export default function VectorDBIdentifier({ workspace }) {
+ const { t } = useTranslation();
return (