diff --git a/frontend/src/locales/en/common.js b/frontend/src/locales/en/common.js index d137e2a3d97..d71c469755b 100644 --- a/frontend/src/locales/en/common.js +++ b/frontend/src/locales/en/common.js @@ -1,4 +1,8 @@ const TRANSLATIONS = { + common: { + "workspaces-name": "Workspaces Name", + }, + // Setting Sidebar menu items. settings: { title: "Instance Settings", @@ -20,6 +24,7 @@ const TRANSLATIONS = { "event-logs": "Event Logs", privacy: "Privacy & Data", }, + // Page Definitions login: { "multi-user": { @@ -29,6 +34,97 @@ const TRANSLATIONS = { }, "sign-in": "Sign in to your AnythingLLM account.", }, + + // Workspace Settings menu items + "workspaces—settings": { + general: "General Settings", + chat: "Chat Settings", + vector: "Vector Database", + members: "Members", + agent: "Agent Configuration", + }, + + // General Appearance + general: { + vector: { + title: "Vector Count", + description: "Total number of vectors in your vector database.", + }, + names: { + description: "This will only change the display name of your workspace." + }, + message: { + title: "Suggested Chat Messages", + description: "Customize the messages that will be suggested to your workspace users.", + add: "Add new message", + save: "Save Messages", + heading: "Explain to me", + body: "the benefits of AnythingLLM", + }, + pfp: { + title: "Assistant Profile Image", + description: "Customize the profile image of the assistant for this workspace.", + image: "Workspace Image", + remove: "Remove Workspace Image", + }, + delete: { + delete: "Delete Workspace", + deleting: "Deleting Workspace...", + "confirm-start": "You are about to delete your entire", + "confirm-end": "workspace. This will remove all vector embeddings in your vector database.\n\nThe original source files will remain untouched. This action is irreversible." + } + }, + + // Chat Settings + chat: { + llm: { + title: "Workspace LLM Provider", + description: "The specific LLM provider & model that will be used for this workspace. By default, it uses the system LLM provider and settings.", + search: "Search all LLM providers", + }, + model: { + title:"Workspace Chat model", + description: "The specific chat model that will be used for this workspace. If empty, will use the system LLM preference.", + wait:"-- waiting for models --" + }, + mode:{ + title: "Chat mode", + chat:{ + title: "Chat", + "desc-start": "will provide answers with the LLM's general knowledge", + and: "and", + "desc-end": "document context that is found.", + }, + query:{ + title: "Query", + "desc-start": "will provide answers", + only: "only", + "desc-end": "if document context is found.", + } + }, + history:{ + title: "Chat History", + "desc-start": "The number of previous chats that will be included in the response's short-term memory.", + recommend : "Recommend 20. ", + "desc-end": "AAnything more than 45 is likely to lead to continuous chat failures depending on message size." + }, + prompt:{ + title: "Prompt", + description: "The prompt that will be used on this workspace. Define the context and instructions for the AI to generate a response. You should to provide a carefully crafted prompt so the AI can generate a relevant and accurate response.", + }, + refusal:{ + title:"Query mode refusal response", + "desc-start": "When in", + query: "query", + "desc-end": "mode, you may want to return a custom refusal response when no context is found.", + }, + temperature:{ + title: "LLM Temperature", + "desc-start": 'This setting controls how "random" or dynamic your chat responses will be.', + "desc-end": "The higher the number (1.0 maximum) the more random and incoherent.", + "recommend": "Recommended:", + } + } }; export default TRANSLATIONS; diff --git a/frontend/src/locales/zh/common.js b/frontend/src/locales/zh/common.js index 82452eb5e16..513b3f37d06 100644 --- a/frontend/src/locales/zh/common.js +++ b/frontend/src/locales/zh/common.js @@ -1,5 +1,9 @@ // Anything with "null" requires a translation. Contribute to translation via a PR! const TRANSLATIONS = { + common: { + "workspaces-name": "工作区名称", + }, + // Setting Sidebar menu items. settings: { title: "设置", @@ -11,7 +15,7 @@ const TRANSLATIONS = { appearance: "外观", "api-keys": "API 密钥", llm: "LLM 首选项", - transcription: "Transcription 模型", + transcription: "Transcription 模型", embedder: "Embedder 首选项", "text-splitting": "文本分割", "vector-database": "向量数据库", @@ -21,6 +25,7 @@ const TRANSLATIONS = { "event-logs": "事件日志", privacy: "隐私与数据", }, + // Page Definitions login: { "multi-user": { @@ -30,6 +35,98 @@ const TRANSLATIONS = { }, "sign-in": "登录", }, + + // Workspace Settings menu items + "workspaces—settings": { + general: "通用设置", + chat: "聊天设置", + vector: "向量数据库", + members: "成员", + agent: "代理配置", + }, + + // General Appearance + general: { + vector: { + title: "向量数量", + description: "向量数据库中的总向量数。", + }, + names: { + description: "这只会更改工作区的显示名称。" + }, + message: { + title: "建议的聊天消息", + description: "自定义将向您的工作区用户建议的消息。", + add: "添加新消息", + save: "保存消息", + heading: "向我解释", + body: "AnythingLLM的好处", + }, + pfp: { + title: "助理头像", + description: "为此工作区自定义助手的个人资料图像。", + image: "工作区图像", + remove: "移除工作区图像", + }, + delete:{ + delete: "删除工作区", + deleting: "正在删除工作区...", + "confirm-start": "您即将删除整个", + "confirm-end": "工作区。这将删除矢量数据库中的所有矢量嵌入。\n\n原始源文件将保持不变。此操作是不可逆转的。" + } + }, + + // Chat Settings + chat: { + llm: { + title: "工作区LLM提供者", + description: "将用于此工作区的特定 LLM 提供商和模型。默认情况下,它使用系统 LLM 提供程序和设置。", + search: "搜索所有 LLM 提供商", + }, + model: { + title: "工作区聊天模型", + description: "将用于此工作区的特定聊天模型。如果为空,将使用系统LLM首选项。", + wait:"-- 等待模型 --", + }, + mode:{ + title: "聊天模式", + chat:{ + title: "聊天", + "desc-start": "将提供法学硕士的一般知识", + and: "和", + "desc-end": "找到的文档上下文的答案。", + }, + query:{ + title: "查询", + "desc-start": "将", + only: "仅", + "desc-end": "提供找到的文档上下文的答案。", + } + }, + history:{ + title: "聊天历史记录", + "desc-start": "将包含在响应的短期记忆中的先前聊天的数量。", + recommend: "推荐 20。", + "desc-end": "任何超过 45 的值都可能导致连续聊天失败,具体取决于消息大小。", + }, + prompt:{ + title: "聊天提示", + description: "将在此工作区上使用的提示。定义 AI 生成响应的上下文和指令。您应该提供精心设计的提示,以便人工智能可以生成相关且准确的响应。", + }, + refusal:{ + title:"查询模式拒绝响应", + "desc-start": "当处于", + query: "查询", + "desc-end": "模式时,当未找到上下文时,您可能希望返回自定义拒绝响应。", + }, + temperature:{ + title: "LLM Temperature", + "desc-start": '此设置控制您的聊天响应的"随机"或动态程度。', + "desc-end": "数字越高(最大为 1.0),随机性和不连贯性就越强。", + recommend: "推荐:", + }, + }, }; export default TRANSLATIONS; + \ No newline at end of file diff --git a/frontend/src/pages/Admin/Workspaces/NewWorkspaceModal/index.jsx b/frontend/src/pages/Admin/Workspaces/NewWorkspaceModal/index.jsx index 0667809a519..53f5c98d74a 100644 --- a/frontend/src/pages/Admin/Workspaces/NewWorkspaceModal/index.jsx +++ b/frontend/src/pages/Admin/Workspaces/NewWorkspaceModal/index.jsx @@ -1,9 +1,11 @@ import React, { useState } from "react"; import { X } from "@phosphor-icons/react"; import Admin from "@/models/admin"; +import { useTranslation } from "react-i18next"; export default function NewWorkspaceModal({ closeModal }) { const [error, setError] = useState(null); + const { t } = useTranslation(); const handleCreate = async (e) => { setError(null); e.preventDefault(); @@ -37,7 +39,7 @@ export default function NewWorkspaceModal({ closeModal }) { htmlFor="name" className="block mb-2 text-sm font-medium text-white" > - Workspace name + {t("common.workspaces-name")} { setHeader({ title: TITLE, description: DESCRIPTION }); @@ -71,7 +73,7 @@ export default function CreateWorkspace({ htmlFor="name" className="block mb-3 text-sm font-medium text-white" > - Workspace Name + {t("common.workspaces-name")}

- The number of previous chats that will be included in the - response's short-term memory. - Recommend 20. - Anything more than 45 is likely to lead to continuous chat failures - depending on message size. + {t("chat.history.desc-start")} + {t("chat.history.recommend")} + {t("chat.history.desc-end")}

@@ -22,7 +23,7 @@ export default function ChatModeSelection({ workspace, setHasChanges }) { }} className="transition-bg duration-200 px-6 py-1 text-md text-white/60 disabled:text-white bg-transparent disabled:bg-[#687280] rounded-md" > - Chat + {t("chat.mode.chat.title")}

{chatMode === "chat" ? ( <> - Chat will provide answers with the LLM's general knowledge{" "} - and document context that is - found. + {t("chat.mode.chat.title")} {t("chat.mode.chat.desc-start")}{" "} + {t("chat.mode.chat.and")} {t("chat.mode.chat.desc-end")} ) : ( <> - Query will provide answers{" "} - only if document context is - found. + {t("chat.mode.query.title")} {t("chat.mode.query.desc-start")}{" "} + {t("chat.mode.query.only")} {t("chat.mode.query.desc-end")} )}

diff --git a/frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx index 9ed42429498..71d943e5ed2 100644 --- a/frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx +++ b/frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx @@ -1,7 +1,7 @@ import useGetProviderModels, { DISABLED_PROVIDERS, } from "@/hooks/useGetProvidersModels"; - +import { useTranslation } from "react-i18next"; export default function ChatModelSelection({ provider, workspace, @@ -9,6 +9,7 @@ export default function ChatModelSelection({ }) { const { defaultModels, customModels, loading } = useGetProviderModels(provider); + const { t } = useTranslation(); if (DISABLED_PROVIDERS.includes(provider)) return null; if (loading) { @@ -16,11 +17,10 @@ export default function ChatModelSelection({

- The specific chat model that will be used for this workspace. If - empty, will use the system LLM preference. + {t("chat.model.description")}