// locales.js
const translations = {
    'zh-CN': {
        'chat': '对话',
        'mainModel': '主模型',
        'tools': '工具',
        'reasoner': '推理',
        'webSearch': '联网',
        'knowledgeBase': '知识库',
        'keyBox': '模型服务',
        'api': 'API',
        'systemSettings': '系统设置',
        'uploadFile': '上传文件',
        'uploadImage': '上传图片',
        'clickOrDrop': '点击或拖放文件到此区域',
        'knowledgeBaseSelection': '知识库选择',
        'noDescription': '暂无描述',
        'noKnowledgeBase': '暂无知识库，请先',
        'goToKnowledgeBase': '前往知识库页面',
        'add': '添加',
        'clearChat': '清空对话',
        'deepThinking': '深度思考',
        'deepResearch': '深度研究',
        'stopGenerating': '停止生成',
        'sendMessage': '发送消息',
        'inputMessage': '请输入消息，按下enter键发送，同时按下shift+enter键换行...',
        'addNewProvider': '添加新供应商',
        'apiAddress': 'API 地址',
        'apiAddressPlaceholder': 'API地址',
        'apiKey': 'API 密钥',
        'apiKeyPlaceholder': 'API密钥',
        'modelId': '模型 ID (可手动填写)',
        'modelIdPlaceholder': '点击放大镜可获取模型列表',
        'selectModel': '选择模型',
        'modelList': '模型列表',
        'provider': '供应商',
        'pleaseSelect': '请选择',
        'customURL': '自定义URL',
        'defaultConfigInfo': '默认配置说明',
        'autoFilledDefaultAPI': '已自动填充',
        'enterApiKeyThenClick': '填入API密钥之后点击右上角的放大镜，可以获得模型ID列表',
        'localConfigNotice': '当你选择Ollama、Vllm、LM studio、Xinference、Dify之后，需要注意修改URL到你实际的URL，Dify目前只支持聊天助手、Agent和Chatflow三种模式的接入！',
        'cancel': '取消',
        'confirm': '确认',
        'confirmAdd': '确认添加',
        'currentTime': '当前时间',
        'fakeInference': '伪推理',
        'latexRenderingEnhancement': 'latex公式渲染增强',
        'languageTone': '语言语气',
        'assistantLanguage': '助手语言',
        'assistantTone': '助手语气',
        'searchEngine': '搜索引擎',
        'webCrawling': '网页抓取',
        'webSearchTiming': '联网时机',
        'beforeThinking': '回复前必然触发',
        'afterThinking': '意图识别后触发',
        'both': '两者都',
        'resultCount': '结果数量',
        'baseURL': '基础URL',
        'searxngURLPlaceholder': 'SearXNG URL (docker部署后开放的端口)',
        'tavilyAPIKeyPlaceholder': 'tavily API 密钥',
        'jinaAPIKeyPlaceholder': 'jina API 密钥(可选)',
        'crawl4AiURLPlaceholder': 'Crawl4Ai url (docker部署后开放的端口)',
        'crawl4AiAPIKeyPlaceholder': 'Crawl4Ai API 密钥(可选)',
        'comfyuiAPIKey': 'comfyui API 密钥(可选)',
        'comfyuiConfigInfo': 'comfyui配置说明',
        'comfyuiConfigInfo1': 'comfyui API 密钥是可选的，如果你不需要调用comfyui内置的API节点，则无需填写。',
        'comfyuiConfigInfo2': '你可以填入多个服务器地址，agent party将会自动对这些服务器负载均衡。',
        'comfyuiConfigInfo3': '由于comfyui生成的图片均在本地，所以当公网上的机器人（如QQ机器人）上访问时，需要开启机器人页面的图床配置。',
        'comfyuiConfigInfo4': '如果你使用的是comfyui整合包，服务器地址默认在8188端口，如果是桌面版，则默认在8000端口。',
        'comfyuiConfigInfo5': '当你要将comfyui工作流上传为工具时，你必须打开comfyui的开发者模式，然后上传的工作流必须是api格式下载的工作流',
        'useSuperAPI': '使用OpenAI API',
        'modelName': '模型名称',
        'apiEndpoint': 'API 端点',
        'exampleLanguage': '示例语言',
        'copy': '复制',
        'basicConfiguration': '基础配置',
        'selectProvider': '选择供应商',
        'firstTimeUse': '首次使用，你可以在',
        'keyBoxInterface': '模型服务界面',
        'addProviderReturnSelect': '添加新的供应商。',
        'advancedConfiguration': '高级配置',
        'temperature': '温度',
        'outputLength': '输出长度',
        'stopWords': '停止词',
        'stopWordsPlaceholder': '输入并选择推理模型输出停止词，请不要将空格作为停止词',
        'conversationRounds': '对话轮数（为0时，则不会压缩记忆）',
        'files': '个文件',
        'model': '模型',
        'default': '默认',
        'segment': '分段',
        'segmentSize': '分段大小',
        'overlapSize': '重叠大小',
        'returnParagraphs': '返回段落数',
        'knowledgeBaseGenerating': '知识库生成中...',
        'addNewKnowledgeBase': '添加新知识库',
        'knowledgeBaseName': '知识库名称',
        'enterKnowledgeBaseName': '请输入知识库名称',
        'knowledgeBaseIntro': '知识库简介',
        'enterKnowledgeBaseIntro': '请输入知识库简介',
        'advancedSettings': '高级设置',
        'advancedSettingsClickExpand': '高级设置(点击展开)',
        'systemLanguage': '系统语言',
        'themeSettings': '主题设置',
        'launching': '正在启动...',
        'launchBrowserMode': '启动浏览器模式',
        'reasonerConfig': '推理配置',
        'createImmediately': '立即创建',
        'vendor.custom': '自定义OpenAI',
        'vendor.OpenAI': 'OpenAI',
        'vendor.Ollama': 'Ollama',
        'vendor.Vllm': 'Vllm',
        'vendor.xinference': 'Xinference',
        'vendor.Dify': 'Dify',
        'vendor.Deepseek': '深度求索',
        'vendor.Volcano': '火山引擎',
        'vendor.siliconflow': '硅基流动',
        'vendor.302.AI':'302.AI',
        'vendor.aliyun': '阿里云百炼',
        'vendor.ZhipuAI': '智谱AI',
        'vendor.moonshot': '月之暗面',
        'vendor.minimax': 'minimax',
        'vendor.LMstudio': 'LM studio',
        'vendor.Gemini': 'Google AI studio',
        'vendor.Anthropic': 'Anthropic',
        'vendor.Grok': 'Grok (xAI)',
        'vendor.mistral': 'mistral',
        'vendor.lingyi': '零一万物',
        'vendor.baichuan': '百川',
        'vendor.qianfan': '百度千帆',
        'vendor.hunyuan': '腾讯混元',
        'vendor.stepfun': '阶跃星辰',
        'vendor.o3': 'o3',
        'vendor.aihubmix': 'aihubmix',
        'vendor.ocoolai': 'ocoolai',
        'vendor.Github': 'Github',
        'vendor.dmxapi': 'dmxapi',
        'vendor.openrouter': 'openrouter',
        'vendor.together': 'together',
        'vendor.fireworks': 'fireworks',
        'vendor.360': '360智脑',
        'vendor.Nvidia': 'Nvidia',
        'vendor.hyperbolic': 'hyperbolic',
        'vendor.jina': 'jina',
        'vendor.gitee': 'gitee',
        'vendor.ppinfra': 'PPIO',
        'vendor.perplexity': 'perplexity',
        'vendor.infini': '无问芯穹',
        'vendor.modelscope': '魔搭',
        'vendor.tencent': '腾讯云',
        'theme.light': '浅色',
        'theme.dark': '深色',
        'theme.midnight': '午夜',
        'theme.desert': '沙漠',
        'theme.neon': '霓虹',
        'theme.marshmallow': '棉花糖',
        'theme.party': '派对',
        'theme.ink': '水墨',
        'copy_success': '已复制到剪贴板',
        'copy_fail': '复制失败',
        'settings_save_failed': '保存设置失败',
        'invalid_file': '无效文件',
        'file_upload_failed': '文件上传失败',
        'message.stopGenerate': '停止生成',
        'fetch_models_failed':'该供应商不支持模型列表获取或未填写API密钥',
        'vendor_required': '请选择供应商',
        'kb_created_successfully': '知识库创建成功',
        'kb_creation_failed': '知识库创建失败',
        'kb_deleted_successfully': '知识库删除成功',
        'kb_deletion_failed': '知识库删除失败',
        'kb_status_change_failed': '知识库状态更改失败',
        'kb': '知识库',
        'enabled': '启用',
        'disabled': '禁用',
        'browserUse': '浏览器控制',
        'browserConfiguration': '浏览器控制配置',
        'chromePath': 'Chrome路径',
        'chromePathPlaceholder': '请输入Chrome路径（可选）',
        'resetToDefault': '重置为默认',
        'browserConfigNotice': '添加供应商后，请返回此页面并选择【具有视觉能力的模型】以继续。视觉模型名称中一般包含“vision”、“image”、“img”等关键词。例如：qwen-vl等',
        'mainmodelnotice': '添加供应商后，请返回此页面并选择【具有工具能力和稳定JSON格式输出能力的模型】以继续。',
        'addresonerNotice': '添加供应商后，请返回此页面并选择【具有推理能力的模型】以继续。推理模型名称中一般包含“reasoner”、“R1”等关键词。例如：deepseek-r1等',
        'addemdNotice': '添加供应商后，请返回此页面并选择【词嵌入模型】以继续。词嵌入模型名称中一般包含“embedding”、“ebd”、“bge”等关键词。例如：bge-m3等',
        'autoUpdateSettingNote':'开启后会智能体回复速度回变慢，如果你不选择任何词嵌入模型，则【长期记忆】将不启用。',
        "NoLongTermMemory": "不启用长期记忆",
        'usePlaywright':"是否使用Playwright",
        'mcpServers': 'MCP服务器',
        'addNewMCP': '添加MCP服务器',
        'mcpJsonFormat': 'JSON格式示例：',
        'mcpJsonInput': 'MCP配置（JSON格式）',
        'mcpJsonPlaceholder': '输入完整的MCP服务器配置，如果路径中包含`\\`字符，请将其修改为`/`或者`\\\\`...',
        'autoApprove': '自动审批',
        'confirmDeleteTitle': '确认删除',
        'confirmDeleteMCP': '确定要删除这个MCP服务器吗？',
        'mcpAddedSuccess': 'MCP服务器添加成功',
        'mcpAddFailed': 'MCP服务器添加失败',
        'invalidJsonFormat': '无效的JSON格式',
        'nvalidMCPConfig': '缺少必要字段（command/args）',
        'serverType': '服务器类型',
        'stdioServer': '标准输入输出',
        'sseServer': '服务器发送事件 (SSE)',
        'wsServer': 'WebSocket',
        'mcpAdded': 'MCP服务器添加成功',
        'invalidCommand': '无效的命令路径',
        'duplicateServer': '服务器名称已存在',
        'mcpServersManagement': 'MCP服务器管理',
        'manageMCPServers': '管理MCP服务器',
        'nomcpServers': '没有MCP服务器',
        'goTomcpServers': '去MCP服务器页面',
        'callingMethod': '调用方法',
        'modelService': '模型服务',
        'agents': '智能体',
        'agentSnapshot': '智能体快照',
        'addNewAgent': '添加智能体',
        'agentName': '智能体名称',
        'systemPrompt': '系统提示词',
        'editAgent': '编辑智能体',
        'addAgent': '添加智能体',
        'agentName': '智能体名称',
        'agentNamePlaceholder': '请输入智能体名称',
        'systemPrompt': '系统提示词',
        'systemPromptPlaceholder': '请输入系统提示词',
        'createAgent': '创建智能体',
        'copyAgentId': '复制智能体ID',
        'editAgent': '编辑智能体',
        'deleteAgent': '删除智能体',
        'agentInfo': '点击创建智能体后，会按照您当前的配置生成一个可调用的智能体，包含系统提示词、模型服务、知识库、工具、推理模型、MCP服务器、A2A服务器等配置。您可以点击左上角的标签复制智能体ID作为OpenAI API请求中的`model`参数，这将会调用对应的智能体。',
        'mainAgent': '主智能体',
        'toolAgents': '工具智能体',
        'agentSettings': '智能体设置',
        'noagents': '没有智能体',
        'goToagents': '去智能体页面',
        'defaultAgent': '默认智能体',
        'aboutUs': '关于我们',
        'a2aServers': 'A2A 服务器',
        'addA2AServer': '添加A2A服务器', 
        'a2aInitFailed': 'A2A初始化失败',
        'noA2AServersPrompt': '尚未添加任何A2A服务器，是否立即添加？',
        'noA2AServers': '没有可用的A2A服务器',
        'a2aServersManagement': 'A2A服务器管理',
        'goToAdd': '去添加',
        'A2AUrl': 'A2A服务器地址',
        'preview': '预览',
        'mermaidError': 'Mermaid语法错误',
        'useWebmode': '使用浏览器模式',
        'chatHistory': '聊天记录',
        'newChat': '新建对话',
        'messages': '条消息',
        'untitled': '未命名对话',
        'cannotDeleteActive': '不能删除当前对话',
        'noChatHistory': '没有聊天记录',
        'emptyConversationRemoved': '空对话已自动移除',
        'newVersion': '新版本可用',
        'updateAvailable': '发现新版本',
        'downloading': '新版本下载中',
        'installNow': '立即安装',
        'updateReady': '已准备好安装',
        'updateDownloaded': '下载完成，点击安装',
        'updateSuccess': '更新已完成',
        'llmTool': 'LLM 工具',
        'addLLMTool': '添加LLM工具',
        'toolName': '工具名称',
        'interfaceType': '接口类型',
        'description': '功能描述',
        'modelName': '模型名称',
        'toolNamePlaceholder': '例如：comfyui LLM party智能体',
        'selectInterfaceType': '选择接口类型',
        'descriptionPlaceholder': '用于智能体选择工具时参考...',
        'modelPlaceholder': '输入或选择模型',
        'llmTools': 'LLM 工具',
        'llmToolsManagement': 'LLM 工具管理',
        'noLLMTools': '未配置 LLM 工具',
        'gollmTools': '去添加 LLM 工具界面',
        'superAPIInstructions':'模型名称默认为super-model，此时调用的是当前配置中的智能体。如果需要调用其他智能体，模型名称请输入对应的智能体名称或者ID，可以在智能体快照查看并复制智能体名称或者ID。',
        'extraParams': "额外参数",
        'paramName': "参数名",
        'paramType': "类型",
        'paramValue': "值",
        'string': "字符串",
        'integer': "整数", 
        'float': "浮点数",
        'boolean': "布尔值",
        'save': "保存",
        'editSystemPrompt': '编辑系统提示词',
        'editMessage': '编辑消息',
        'enterContent': '请输入内容',
        'edit': '编辑',
        'defaultSystemPrompt': ' ',
        'system_prompt': '系统提示词：',
        'expand': '展开',
        'collapse': '收起',
        'delete': '删除',
        'reset': '重置',
        'selectOrTypeModel': '选择或输入模型名称',
        'expandAside': '展开侧边栏',
        'collapseAside': '收起侧边栏',
        'pollinationsImageGeneration': 'Pollinations 图像生成',
        'languagePlaceholder': '请输入语言',
        'tonePlaceholder': '请输入语气',
        'agentSuite': '智能体',
        'modelConfig': '模型',
        'modelService': '模型服务',
        'mainModel': '主模型',
        'reasonerModel': '推理模型',
        'toolkit': '工具',
        'apiGroup': '开发者',
        'openaiStyleAPI': 'OpenAI风格API',
        'browserMode': '浏览器模式',
        'file_type_error': '文件类型不支持',
        'image_type_error': '图片类型不支持',
        'uploadImage': '上传图片',
        'newChat': '新建对话',
        'visionModel': '视觉模型',
        'visionSettings': '视觉配置',
        'addVisionNotice': '添加供应商后，请返回此页面并选择【视觉模型】以继续。视觉模型名称中一般包含“vision”、“v”、“o”等关键词。例如：qwen-vl等',
        'vision': '视觉',
        'KBSearchTiming': '知识库搜索时机',
        'error_unknown': '未知错误',
        'kbSettings': '知识库配置',
        'addKnowledgeBase': '添加知识库',
        'embeddingProvider': '词嵌入模型供应商',
        'reasoningProvider': '推理模型供应商',
        'visionProvider': '视觉模型供应商',
        'gotoAPI': '获取API密钥或相关文档',
        'gotoGithub': '获取Github文档',
        'streamableHttpServer': '流式HTTP',
        'rerankEnable': '启用重排模型',
        'returnChunks': '返回结果数量',
        'rerankProvider': '重排模型供应商',
        'rerankmodelnotice': '添加供应商后，请返回此页面并选择【重排模型】以继续。重排模型名称中一般包含“rerank”、“rank”等关键词。目前供应商只支持jina和Vllm',
        'storage': '存储空间',
        'storageText': '文本文件',
        'storageImage': '图片文件',
        'storageVideo': '视频文件',
        'keywordSemanticWeight': '关键词↔️语义 搜索权重',
        'keyword': '关键词',
        'semantic': '语义',
        'MCPStyleAPI': 'MCP风格API',
        'MCPAPIInstructions':'可以在其他支持MCP的客户端中选择SSE协议，并填入对应的URL，即可使用',
        'docker': 'Docker',
        'dockerInstructions':'请确保您已经安装了Docker，并且Docker服务正在运行。你可以复制以下代码在终端中运行，以启动一个super agent party的本地服务。⭐注意！`./super-agent-data`可以替换为任意本地文件夹，docker启动后，所有数据都将缓存到该本地文件夹，不会上传到任何地方。',
        'fileDeleted': '文件已删除',
        'fileDeleteFailed': '文件删除失败',
        'noFiles': '存储空间中无文本文件',
        'noImages': '存储空间中无图片文件',
        'noVideos': '存储空间中无视频文件',
        'preview': '预览',
        'fileGet': '获取文件/图片URL中的内容',
        'memory': '酒馆角色卡',
        'addNewMemory': '添加新角色卡',
        'memoryConfig': '角色卡配置',
        'vectorProvider': '向量供应商', 
        'selectMemoryPlaceholder': '请选择角色卡',
        'selectMemory': '当前角色卡',
        'availableVoice':'可用角色音色',
        'memoryEnable': '启用角色卡',
        'memoryInterface':'角色卡页面',
        'memoryName': '角色卡名称',
        'worldviewSetting': '世界观设定',
        'settingName': '设定名称',
        'settingValue': '设定值',
        'basicCharacter': '角色设定',
        'inputBasicCharacter': '请输入角色设定',
        'interpreter': '代码工具',
        'codeEngine': '代码引擎',
        'e2bAPIKeyPlaceholder': '请输入E2B API Key',
        'networkSettings': '网络配置',
        'local': '仅本机可见',
        'global': '所有设备可见',
        'localVendor': '本地模型',
        'restartConfirmText': '是否确认重启以更新当前配置？',
        'restartConfirm': '重启确认',
        'confirmRestart': '确认重启',
        'memoryDeleted': '记忆已删除',
        'memoryDeleteFailed': '记忆删除失败',
        'AgentDeleted': '智能体已删除',
        'AgentDeleteFailed': '智能体删除失败',
        'sandboxURLPlaceholder': '请输入Sandbox Fusion URL',
        'addCustomHttpTool': '添加自定义HTTP工具',
        'editCustomHttpTool': '编辑自定义HTTP工具',
        'HTTPNamePlaceholder': '请输入工具名称，只能包含英文字符',
        'url': 'URL',
        'urlPlaceholder': '请输入工具的URL',
        'headers': '请求头',
        'headersPlaceholder': `{
    'Content-Type': 'application/json',
    'Authorization': 'Bearer YOUR_API_KEY_HERE'
}`,
      'body': '请求体',
      'bodyPlaceholder': `请输入请求体，使用JSON Schema格式。例如：
{
    "type": "object",
    "properties": {
        "name": {
            "type": "string",
            "description": "The name of the person",
            "default": "John Doe",
            "enum": ["John Doe", "Jane Doe"]
        },
        "age": {
            "type": "integer",
            "description": "The age of the person",
            "minimum": 0,
            "maximum": 120,
            "default": 30,
        }
    },
    "required": ["name", "age"]
}`,
        'bodyJsonSchemaFormat': '请求体应采用JSON Schema格式',
        'customHttpTool': 'HTTP工具',
        'HttpToolsManagement': 'HTTP工具管理',
        'noHttpTools': '暂无HTTP工具',
        'goHttpTools': '前往添加HTTP工具',
        'qqBot': 'QQ机器人',
        "qq_bot_config": "QQ机器人",
        "enter_qq_bot_app_id": "请输入QQ机器人App ID",
        "enter_qq_bot_secret": "请输入QQ机器人Secret",
        "start_bot": "启动机器人",
        "stop_bot": "停止机器人",
        "reload_bot": "重载机器人",
        'separators': '消息分隔符',
        'deployBot': '机器人',
        'qqBotConfig': 'QQ机器人配置',
        'selectExistingMemory': '从已有角色开始',
        'selectExistingMemoryPlaceholder': '请选择已有角色',
        'none': '无',
        'reasoningVisibleEnable': '推理过程可见',
        'text2imgModel': '文生图模型',
        'imgModel':'图像生成模型',
        'width': '宽度',
        'height': '高度',
        'openaiLike': '类OpenAI接口',
        'pollinations':'pollinations（免费，需联网，仅文生图）',
        'openaiImageLike': '类OpenAI图片生成接口（仅文生图）',
        'openaiChatLike':'类OpenAI对话完成接口（部分模型支持该兼容接口，例如：nano banana）',
        'imgModelProvider':'图像生成模型供应商',
        'addimgNotice':'添加供应商后，请返回此页面并选择【图像生成模型】以继续。图像生成模型名称中一般包含“img”、“image”等关键词。目前支持所有openai对话完成接口兼容的供应商。',
        'addText2imgNotice': '添加供应商后，请返回此页面并选择【文生图模型】以继续。文生图模型名称中一般包含“img”、“image”等关键词。目前支持所有openai图像生成接口兼容的供应商。',
        'addAsrNotice': '添加供应商后，请返回此页面并选择【语音识别模型】以继续。语音识别模型名称中一般包含“transcribe”、“speech”等关键词。目前支持所有openai兼容的供应商。',
        'text2imgModelProvider': '文生图模型供应商',
        'asrModelProvider': '语音识别模型供应商',
        'size': '尺寸',
        'sizeAllowCreate': '尺寸(允许手动输入)',
        'auto': '自动',
        'text2img': '文生图',
        'imgGen': '图像生成',
        'randomSetting': '随机设定',
        'worldviewSettingNote': '世界观设定：类似与lorebook，当用户提及或智能体上次对话提及某个设定名称时，智能体会在对话中看到设定值。',
        'basicCharacterNote': '角色设定：所有信息都会直接添加到系统提示中，智能体会根据角色设定进行对话。',
        'randomSettingNote': '随机设定：开启一个新的对话后，一个随机设定会被添加到系统提示中，智能体会根据随机设定进行对话。',
        'text2imgEngine': '文生图接口',
        'imgEngine': '图像生成接口',
        'autoUpdateSetting': '长期记忆',
        'autoUpdateNote': '长期记忆：会跟随对话内容，动态更新一个记忆库。对话时，会根据用户的提问，将相关的记忆返回给智能体。',
        'quickRestartEnable': '启用`/重启`快捷指令',
        'bot_config': '机器人通用配置',
        'imgHost': '图床/文件床',
        'imgHostType': '图床/文件床类型',
        'mcpDeleted': 'MCP已删除',
        'mcpDeleteFailed': 'MCP删除失败',
        'gotoEasyImage2Github': '获取EasyImage2 Github文档',
        'collapseInput': '收起输入框',
        'expandInput': '展开输入框',
        'googleCSEIdPlaceholder':'请输入Google Custom Search Engine ID',
        'searchEndpoint': '搜索接口URL',
        'gotoQQbot': '前往QQ开放平台',
        'comfyuiServers': 'ComfyUI服务器',
        'addServer': '添加服务器',
        'server': '服务器',
        'noServerTip': '未连接服务器',
        'comfyuiConfig': 'ComfyUI配置',
        'comfyuiDisplay': 'ComfyUI显示',
        'connectServer': '连接服务器',
        'connectComfyUIServer': '连接ComfyUI服务器',
        'removeComfyUIServer': '移除ComfyUI服务器',
        'comfyuiWorkflowTool': 'ComfyUI 工作流转工具',
        'ComfyUIManagement': 'ComfyUI管理',
        'noWorkflows': '暂无工作流',
        'goComfyui': '前往添加工作流',
        'uploadWorkflowJson': '上传工作流 JSON',
        'textInput': '文本输入',
        'selectTextInput': '请选择文本输入',
        'imageInput': '图片输入',
        'selectImageInput': '请选择图片输入',
        'seedInput': '随机种子输入',
        'selectSeedInput': '请选择随机种子输入',
        'comfyuiWorkflowInfo': '尽可能地在第一个输入框中描述这个工作流的功能，以及每个输入的作用，例如：第一个文字输入为正向图片提示词，第二个文字输入为反向图片提示词，第三个图片输入为需要修改的图片，第四个图片输入是需要参考的风格图片。不是每一个文字输入和图片输入都需要填，没有填充的输入项会被忽略。',
        'repo_owner': '仓库拥有者',
        'repo_owner_placeholder': '请输入仓库拥有者',
        'repo_name': '仓库名称',
        'repo_name_placeholder': '请输入仓库名称',
        'branch': '分支',
        'branch_placeholder': '请输入分支',
        'token': '私人令牌',
        'token_placeholder': '请输入私人令牌',
        'stickerPacks': '表情包',
        'createStickerPack': '创建表情包',
        'packName': '表情包/图片库名称',
        'uploadStickers': '上传表情/图片',
        'addTags': '添加标签',
        'addStickerPack': '添加表情包/图片库',
        'sticker': '表情',
        'imageDescription': '图片描述',
        'enterDescriptionPlaceholder': '请输入图片描述',
        'sticker/image': '表情包/图片库',
        'utilityTools': '实用工具',
        'images': '张图片',
        'stickerPackCreated': '表情包/图片库已创建',
        'createFailed': '创建失败',
        'stickerPackDeleted': '表情包/图片库已删除',
        'StickerPackManagement': '表情包/图片库管理',
        'noStickerPack': '暂无表情包/图片库',
        'goStickerPack': '前往添加表情包/图片库',
        'clickOrDropJson': '点击或拖拽JSON文件至此处',
        'clickOrDropCharacterCards': '点击或拖拽JSON/PNG格式的角色卡至此处',
        'noResults': '暂无结果',
        "searchChatHistoryPlaceholder": "搜索对话历史",
        'fastAPIDocs': 'FastAPI文档',
        'asyncTools': '异步工具',
        'asyncToolsNotice': '工具调用时，不会阻塞对话，工具结果将在生成后加入到对话中。注意！只与流式调用API有关，非流式调用API不会生效。',
        'currentTimeNotice': '对话时会自动返回当前时间和时区信息，或者主动查询指定时区当前时间',
        'fileGetNotice': '智能体将获得查询文件或图片URL的能力。获取图片URL时，主模型需要有视觉能力或者视觉模型被正确配置并启用。',
        'deepResearchNotice': '开启深度研究模式，智能体将动态规划并调用工具以解决复杂问题，最好配合联网或者本地知识库使用',
        'fakeInferenceNotice': '开启伪推理模式，让没有推理能力的模型也能推理',
        'latexRenderingEnhancementNotice': '让智能体能够更稳定地输出LaTeX公式',
        'languageNotice': '智能体将使用指定语言和语气进行对话',
        'asrModel': '语音识别模型',
        'asrEngine': '语音识别接口',
        'asr': '语音识别',
        'funasrURL': 'FunASR URL',
        'funasrURLPlaceholder': '请输入FunASR URL',
        'funasrNotice1': '1. 请在docker中启动FunASR服务，需要在终端中输入：` docker run -d -p 10095:10095 -p 10096:10096 harryliu888/funasr-online-server:latest `',
        'funasrNotice2': '2. 热词一行一个，每行按词语+空格+权重的方式填入',
        'mode': '模式',
        'online': '实时',
        'offline': '非实时',
        'interactionMethod': '交互方式',
        'auto':'自动',
        'manual': '手动',
        'wakeWord': '唤醒词',
        'endWordMode': '结束词: 开启唤醒词与结束词模式后可用，该模式下，唤醒状态不会自动睡眠，需要带结束词的消息会进入睡眠状态',
        'endWordPlaceholder': '请输入结束词',
        'endWordDetected': '进入睡眠',
        'wakeWordAndEndWord': '唤醒词与结束词',
        'wakeWordMode': '唤醒词：开启唤醒词模式后可用，该模式下，检测到唤醒词时，才会进入唤醒状态，并在30秒后进入睡眠状态',
        'wakeWordPlaceholder': '请输入唤醒词',
        'hotkey': '快捷键：按下时录音，松开时自动发送',
        'Space':'空格',
        'hotwords': '热词',
        'hotwordsPlaceholder': '请输入热词',
        'ttsModel': '语音合成模型',
        'asrReady': '语音识别已准备就绪',
        'ttsEngine': '语音合成接口',
        'tts': '语音合成',
        'pause': '暂停',
        'play': '播放',
        'initializing': '初始化中',
        'backward': '向后',
        'forward': '向前',
        'edgettsLanguage': 'EdgeTTS语言',
        'edgettsGender': 'EdgeTTS性别',
        'edgettsVoice': 'EdgeTTS声音',
        'enabledInterruption': '是否允许被打断（开启后，如果麦克风离扬声器太近，会导致模型的语音自己打断自己）',
        'edgettsRate': 'EdgeTTS语速',
        'maxConcurrency': '语音合成最大并发数',
        "zh":"中文",
        "en":"英文",
        "ja":"日语",
        "ko":"韩语",
        "yue" :"粤语",
        "auto": "自动",
        "auto_yue": "自动(粤语)",
        "gsvRate": "语速",
        "gsvSample_steps": "采样步数",
        'gsvTextLang': '目标语言',
        'gsvPromptLang': '参考音频语言',
        'gsvPromptAudio': '参考音色',
        'uploadGsvRefAudio': '上传参考音色',
        'gsvPromptText': '参考音频文本',
        'gsvPromptTextPlaceholder': '请输入参考音频中对应的文本',
        'addRefAudio': '添加参考音色',
        'gsvNotice1': '1. 请点击上方文档链接，下载GSV-V4版本的整合包，推荐使用v2版本',
        'gsvNotice2': '2. 请在整合包项目路径下打开终端，执行` runtime/python.exe api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml `启动服务',
        'gsvServer': 'GSV服务器URL(可以输入多个，一行一个)',
        'gsvServerPlaceholder': '请输入GSV服务器URL',
        'Male': '男',
        'Female': '女',
        'tablePet': 'VRM桌宠机器人',
        'table_pet_config': 'VRM桌宠配置',
        'gotoVroid': '在Vroid Studio中创建VRM',
        'start_table_pet': '启动VRM桌宠',
        'stop_table_pet': '停止VRM桌宠',
        'reload_table_pet': '重载VRM桌宠',
        'start_table_pet_web': '启动VRM桌宠(网页)',
        'WebSocketConnected':'WebSocket已连接 - 点击断开',
        'WebSocketDisconnected':'WebSocket已断开 - 点击连接',
        'dragWindow': '拖拽以移动窗口',
        'refreshWindow': '刷新窗口',
        'closeWindow': '关闭窗口',
        'enabledExpressions': '启用表情',
        "vrmModel": "VRM模型",
        "addVrmModel": "添加VRM模型",
        "uploadVrmModel": "上传VRM模型",
        "clickOrDropVrm": "点击或拖拽.vrm文件到此处",
        "modelDisplayName": "模型显示名称",
        "modelDisplayNamePlaceholder": "请输入模型的显示名称",
        "vrmNotice1": "1. 请上传.vrm格式的3D模型文件；你也可以上传.vrma格式的动画文件让模型有更多的闲置动作！",
        "vrmNotice2": "2. 鼠标左键旋转，滚轮缩放，右键平移，右上角第一个按钮拖拽整体窗口",
        'vrmNotice3': '3. 如果你希望录制透明背景的数字人口播视频或者在直播时录制桌宠，请在录制软件的浏览器源中添加`http://127.0.0.1:3456/vrm.html`作为视频源',
        'Previous': '上一个',
        'Next': '下一个',
        'gotoVroidHub': 'Vroid Hub获取VRM',
        'webSpeech': 'Web Speech API(免费，但仅浏览器模式下可用)',
        'webSpeechStarted': 'Web Speech API已启动',
        'webSpeechNotSupportedInElectron': '在Electron中 Web Speech API 不支持，自动开启浏览器模式中...',
        'webSpeechNotice1': '以Web Speech API引擎启动语音识别，将自动跳转到浏览器模式',
        'windowWidth': '窗口宽度',
        'windowHeight': '窗口高度',
        'live_stream': '直播',
        'live_stream_bot': '直播机器人',
        'bilibili': 'B站',
        'roomID': '房间号',
        'roomIDPlaceholder': '请输入房间号',
        'sessdata': 'SESSDATA',
        'sessdataPlaceholder': '请输入SESSDATA',
        'web':'Web端',
        'bilibili_open_live': 'B站直播开放平台',
        'liveType': '直播类型',
        'liveTypePlaceholder': '请选择直播类型',
        'ACCESS_KEY_ID_Placeholder': '请输入Access Key ID',
        'ACCESS_KEY_SECRET_Placeholder': '请输入Access Key Secret',
        'APP_ID_Placeholder': '请输入APP ID',
        'ROOM_OWNER_AUTH_CODE_Placeholder': '请输入Room Owner Auth Code',
        'start_live': '开始直播',
        'stop_live': '停止直播',
        'reload_live': '重载直播配置',
        'liveSetting': '直播设置',
        'onlyDanmaku': '仅回复弹幕',
        'danmakuQueueLimit': '弹幕队列上限',
        'gotoBilibiliOpenLive': '前往B站直播开放平台',
        'wxBot': '微信机器人',
        'wx_bot_config' : '微信机器人配置',
        'nickName': '昵称',
        'nickNamePlaceholder': '请手动输入昵称，然后选择这个昵称，可以输入多个昵称，也可以是群聊名称',
        'windowsOnly': '仅限Windows',
        'groupWakeWord': '群聊唤醒词',
        'triggerMode': '触发模式',
        'accuweather': 'AccuWeather天气',
        'weatherNotice': '可以获取指定城市的经纬度、实时或预测天气。支持返回天气预测图',
        'openMeteoWeather': 'OpenMeteo天气',
        'wikipedia': '维基百科',
        'wikipediaNotice': '获取指定关键词的维基百科摘要或具体章节内容',
        'toolMemorandum': '工具备忘录',
        'toolMemorandumNotice': '开启时，会将工具调用结果添加到聊天记录中，关闭后，则不会添加，节省token消耗',
        'arxiv': 'arXiv',
        'arxivNotice': '从arXiv获取最新的论文摘要',
        'gotoAplaybox': '模之屋获取VRM',
        'briefly': '简要',
        'rewrite': '重写',
        'proxySettings': '代理设置',
        'proxyPlaceholder': '请输入代理地址，例如：http://127.0.0.1:7890 或 http://127.0.0.1:10809',
        'proxyNotice': '如果跟随系统代理没有生效，可以尝试手动代理。手动代理需要输入HTTP代理地址，例如：http://127.0.0.1:7890 或 http://127.0.0.1:10809',
        'wxNotice': '启动机器人之前，你必须先登录微信，并且保证微信窗口在屏幕上。agent party将模拟用户行为接管你的微信。',
        'language': '语言',
        'webSpeechAPI': 'Web Speech API',
        'openUserfile': '打开数据文件夹',
        'openLogfile': '打开日志文件夹',
        'userfileNotice':'删除数据文件夹中所有文件可以将软件恢复到初始状态',
        'logfileNotice':'你可以查看日志文件夹中最新的两个log文件以获取软件运行日志', 
        'openExtfile': '打开扩展文件夹',
        'SubtitleEnabled': '字幕已开启',
        'SubtitleDisabled': '字幕已关闭',
        'bufferWord': '缓冲词',
        'bufferWordPlaceholder': '请输入缓冲词，可以让语音合成显得更快，例如：嗯，好的',
        'UsingVRMAAnimations': '当前使用VRM模型动画',
        'UsingProceduralAnimations': '当前使用程序化动画中',
        'clickToUse': '1. 桌面端点击按钮启动浏览器模式',
        'scanToUse': '2. 手机浏览器扫码快速使用浏览器模式，扫码前，需要在系统设置中将网络设置为所有设备可见，且手机和电脑在同一个局域网内',
        'low': '低',
        'medium': '中',
        'high': '高',
        'reasoningEffort': '推理强度（只有部分模型支持，不支持的模型请选自动）',
        'TTSModelProvider': '语音合成模型服务商',
        'TTSVoice': '音色(如果列表中没有可以手动添加)',
        'TTSRate': '语速',
        'addTTSNotice': '添加供应商后，请返回此页面并选择【语音合成模型】以继续。语音识别模型名称中一般包含“tss”等关键词。目前支持的供应商：openai、硅基流动等。注意！音色需要与供应商的模型匹配，否则无法使用，默认音色列表中仅包含了openai的音色，其他供应商的音色需要手动添加。参考音色暂时只支持硅基流动，其他供应商请不要选择参考音色！',
        'noRefAudio': '不使用参考音频',
        'error_start_HA': '启动Home Assistant失败，请检查配置文件是否正确',
        'error_stop_HA': '停止Home Assistant失败',
        'homeAssistant': 'Home Assistant',
        'success_start_HA': 'Home Assistant启动成功',
        'success_stop_HA': 'Home Assistant停止成功',
        'HANotice1': '1. 安装Home Assistant',
        'HANotice2': '2. 在Home Assistant的设置-设备与集成-添加集成，然后搜索MCP，添加Model Context Protocol Server',
        'HANotice3': '3. 基础URL默认为`http://localhost:8123`，你需要改成`http://<你的IP>:8123`',
        'HANotice4': '4. 点击Home Assistant左下角的用户头像，再点击安全，在最下方添加长期访问令牌，将令牌复制到API密钥中',
        'browserControl': '浏览器控制',
        'browserNotice1': '1. 点击上方的链接，安装浏览器扩展',
        'browserNotice2': '2. 你的电脑需要安装node环境，可以从 `https://nodejs.org/en/download` 下载安装',
        'browserNotice3': '3. 在新扩展页面点击连接，然后在本页面启动浏览器控制',
        'gotoBrowserExtension': '前往浏览器扩展',
        'success_start_browserControl': '浏览器控制启动成功',
        'error_start_browserControl': '浏览器控制启动失败，请检查配置文件是否正确',
        'error_stop_browserControl': '浏览器控制停止失败',
        'success_stop_browserControl': '浏览器控制停止成功',
        'selectVrmModel': '选择VRM模型',
        'defaultModels': '默认模型',
        'userModels': '用户模型',
        'selectVrmaMotions': '选择VRM模型动画',
        'defaultMotions': '默认动画',
        'userMotions': '用户动画',
        'addVrmaMotion': '添加VRM模型动画',
        'uploadVrmaMotion': '上传VRM模型动画',
        'clickOrDropVrma': '点击或拖拽VRM模型动画到此处',
        'motionDisplayName': '动画名称',
        'motionDisplayNamePlaceholder': '请输入动画名称',
        'characterDescription': '角色描述',
        'characterDescriptionPlaceholder': '请输入角色描述',
        'personality': '性格',
        'personalityPlaceholder': '请输入性格',
        'mesExample': '对话示例',
        'mesExamplePlaceholder': `请输入对话示例，例如：
{{user}}: 你好
{{char}}: 你好哇`,
        'systemPromptPlaceholder': '请输入系统提示，例如：总是使用用户使用的语言与之交流',
        'characterBook': '角色设定书',
        'keysPlaceholder': `key1
key2
key3`,
        'contentPlaceholder': '请输入当提及相关关键词，会添加到上下文中的相关内容',
        "firstGreeting": "开场白",
        'firstMes': '开场白',
        'alternateGreeting': '额外开场白',
        'keys': '关键词(每行一个)',
        'content': '内容',
        'userName': '用户名',
        'userNamePlaceholder': '请输入用户名，这决定了角色如何称呼你',
        'genericSystemPrompt': '通用系统提示',
        'genericSystemPromptPlaceholder': '请输入通用系统提示，例如：总是使用用户使用的语言与之交流',
        'avatar': '头像',
        'avatarPlaceholder': '请输入头像链接或者本地文件绝对路径，例如：https://example.com/avatar.png 或 /Users/username/avatar.png 或 C:\\Users\\username\\avatar.png',
        'translate': '翻译',
        'translating': '翻译中',
        'is_sandbox': '是否开启沙盒模式（不限制IP，但只有你和你的测试群可见，沙盒最多一次回复4条消息）',
        'confirmClearAllHistory': '确定要清空所有历史记录吗？',
        'warning': '警告',
        'clearSuccess': '清空成功',
        'confirmKeepLastWeek': '确定要只保留最近一周的历史记录吗？',
        'keepLastWeek': '保留最近一周历史记录',
        'clearAllHistory': '清空所有历史记录',
        'getCardLink': '在哪里获取角色卡？',
        'memoryResultCount': '长期记忆结果数量',
        'customTTS': '自定义TTS（开源的TTS整合包绝大部分都是这个接口，例如cosyVoice/indexTTS等）',
        'customTTSserver': '自定义TTS服务器地址(可以输入多个，一行一个)',
        'customTTSServerPlaceholder': '请输入自定义TTS服务器地址',
        'customTTSspeaker': '自定义TTS音色',
        'customTTSspeakerPlaceholder': '请输入自定义TTS音色',
        'customTTSspeed': '自定义TTS语速',
        "customTTSKeyMapping": "API 参数键名映射 (高级)",
        "key_text": "文本键名 (默认: text)",
        "key_speaker": "音色键名 (默认: speaker)",
        "key_speed": "语速键名 (默认: speed)",
        'gsvGsvAudioPath': '参考音频路径（如果上传了文件可不填）',
        'gsvGsvAudioPathPlaceholder': '请输入参考音频路径',
        'selectAll': '全选',
        'batchDelete': '批量删除',
        'batchDeleteSuccess': '批量删除成功',
        'batchDeleteFailed': '批量删除失败',
        'readBot': '播报机器人',
        'start_read': '开始阅读',
        'stop_read': '停止阅读',
        'longTextPlaceholder': '请输入需要被阅读的长文本，配合桌宠机器人可以实现数字人口播功能。你可以使用<音色名></音色名>将使用对应音色括起来，实现多音色朗读。如果你导入的是epub格式的文件，你可以点击右上角的按钮切换章节。如果你想要快速转换音频，请选择较少的分割符，例如仅选择换行符，如果你想要让数字人口播或者多语音朗读，请选择更多的分割符，例如逗号和句号。',
        'selectFile': '选择文件',
        "parseFileContent": "解析文件内容",
        "clearText": "清空文本",
        'ttsNotEnabled': '语音合成未启用',
        'ttsAutoEnabled': '语音合成已自动启用',
        'getAPIkey': '获取API密钥或者说明文档',
        'getModelsList': '获取模型列表',
        "defaultTTS": "默认语音合成",
        "addNewTTS": "新增语音合成",
        "ttsName": "音色名（可以选择，也可以手动填写）",
        'LockWindow':'窗口已解锁（点击锁定）',
        'UnlockWindow':'窗口已锁定（点击解锁）',
        'gotoComfyui':'如果想使用comfyui,请点击跳转',
        'ComfyuiInterface':'comfyui界面',
        'text2imgNotice':'默认使用免费的Pollinations，你可以切换到类openai接口，以使用其他模型。',
        'translateAndMark':'翻译并标注音色',
        "Narrator":"旁白",
        'CharacterMemory':'角色记忆',
        'role':'角色',
        'CharacterVoice':'角色语音',
        'CharacterAppearance':'角色外观',
        "noNet":' (免费！但中国网络环境无法访问，且不稳定！)',
        'addNewAppearance':'新增角色外观',
        'AppearanceName':'外观名称',
        'localConfigNotice02':'点击钥匙按钮，可以跳转到API key的获取网页或者github相关文档',
        'localConfigNotice03':'点击放大镜按钮，可以获取模型列表，如果没有获取到，可能是该供应商不支持获取模型列表（例如火山引擎），或者API key不正确',
        'modelConfigDescription':'模型配置说明',
        'vrmAPI':'VRM桌宠API',
        'vrmAPIInstructions':'将下方的url复制到录屏软件，如：OBS 的浏览器源中，即可将桌宠透明地嵌入到你的视频里',
        'downloadAudio':'下载音频',
        'convertAudioOnly':'仅转换音频',
        'contentTooLong':'内容太长，请分段发送，已截取前一万个字符',
        'stop_all_tts':'停止所有语音合成',
        "processingProgress": "处理进度",
        "waiting": "准备中...",
        'goToMemoryNotice1':"角色卡是可选项，如果需要使用角色卡，请点击跳转到",
        'copyProvider': '复制供应商',
        'goToMainAgentNotice1':"当你选择某一个智能体快照时，所有的配置将锁定到创建智能体快照时的配置，直到你切换回默认智能体，如需创建智能体快照，请点击跳转到",
        'MainAgentInterface':'智能体快照界面',
        'prevPage': '上一页',
        'nextChapter': '下一章',
        'prevChapter': '上一章',
        'audioConversionStopped': '音频转换已停止',
        'audioMergeFailed': '音频合并失败',
        'audioDownloadStarted': '音频下载已开始',
        'audioDownloadFailed': '音频下载失败',
        'noValidAudioChunks': '没有有效的音频块',
        'noAudioToDownload': '没有音频可以下载',
        'TTSInterface': '语音合成界面',
        'SeparatorNotice': '1. 播报时如果与AI语音交互，播报会自动暂停，直到回复结束时，播报会自动恢复',
        'SeparatorNotice1': '2. 如果想要调整语音合成配置，点击跳转',
        'audioConversionCompleted': '音频转换已完成',
        'CharacterBehavior':'角色行为',
        'AutoBehavior':'自主行为',
        'addNewBehavior':'添加新的行为',
        'behaviorConfig':'行为配置',
        'longTermMemoryNote':'长期记忆是可选项，长期记忆结果数量会控制每次模型能够看到的最大相关记忆数量，如果需要使用长时记忆，请在【添加新角色卡】界面添加角色卡，并在长期记忆选项卡中启用',
        'addBehavior': '添加行为',
        'behavior': '行为',
        'triggerType': '触发类型',
        'time': '时间',
        'noInput': '无输入',
        'selectTime': '选择时间',
        'repeatDays': '重复周期（不选择则为不重复）',
        'monday': '周一',
        'tuesday': '周二',
        'wednesday': '周三',
        'thursday': '周四',
        'friday': '周五',
        'saturday': '周六',
        'sunday': '周日',
        'noInputLatency': '闲置延迟（单位：秒）',
        'actionType': '执行类型',
        'prompt': '提示词',
        'promptPlaceholder': '请输入提示词内容，例如：`请随便找个话题，说点什么，让用户愿意与你聊天`',
        'removeBehavior': '删除行为',
        'promptAction': '提示词（动作触发时，会将以下提示词发给模型，令其作出反应）',
        "noInputName": "闲置行为",
        "timeName": "定时行为",
        "cycleName": "周期行为",
        'allNoBriefly': '显示推理和工具信息',
        'allBriefly': '自动收起推理和工具信息',
        'enableAutoBehavior': '启用自主行为',
        'deleteBehaviorSuccess': '删除行为成功',
        'randomEvent': '随机事件',
        'eventType': '事件类型',
        'random': '随机',
        'order': '顺序',
        'triggerNewRandomEvent1': '触发了随机事件：【',
        'triggerNewRandomEvent2': '】请根据事件继续对话。',
        'cycleValue': '周期值',
        'repeatNumber': '重复次数',
        'isInfiniteLoop': '是否无限循环',
        'addNewBehaviorChain': '添加新的行为链（施工中🚧）',
        'autoBehaviorTool': '自主行为工具',
        'enableAutoBehaviorTool': '启用自主行为工具',
        'autoBehaviorToolNotice': '自主行为工具允许模型添加自动化行为，例如提醒或定时操作。行为可以是定时的（如闹钟）、闲置触发的（在无活动后启动）或周期性的（按间隔重复）。',
        'deleteAllBehaviorSuccess': '删除所有行为成功',
        'removeAllBehavior': '删除所有行为',
        'resetBehavior': '重置行为',
        'minimal': '最小化',
        'none': '无',
        'modifyAndSend': '修改并发送',
        'next': '下一步',
        'prev': '上一步',
        'done': '完成',
        'guide.model-config': '请点击【模型】',
        'guide.model-config-notice': '点击【模型】界面，配置你的第一个模型',
        'guide.add-provider-card': '点击【添加新供应商】',
        'guide.add-provider-card-notice': '点击【添加新供应商】按钮，添加你的第一个供应商',
        'guide.show-Add-Dialog':'选择供应商',
        'guide.show-Add-Dialog-notice':'选择任意一个供应商，其对应的供应商URL将被自动填充。',
        'guide.confirm-Add-Provider-Button':'点击确认',
        'guide.get-API-key':'点击钥匙按钮',
        'guide.get-API-key-notice':'点击钥匙按钮，可以跳转到API key的获取网页或者github相关文档',
        'guide.get-Models-List':'点击放大镜按钮',
        'guide.get-Models-List-notice':'点击放大镜按钮，可以获取模型列表，如果没有获取到，可能是该供应商不支持获取模型列表（例如火山引擎），或者API key不正确',
        'guide.model-Id':'选择模型',
        'guide.model-Id-notice':'选择一个模型，也可以手动填写模型ID',
        'guide.input-api-Key':'填写API key',
        'guide.input-api-Key-notice':'本地推理供应商如：ollama/vllm等不需要填写API key，其他供应商需要填写API key',
        'guide.driver-guide-btn':'欢迎使用super agent party！',
        'guide.driver-guide-btn-notice':'点击这个按钮会重新开启教程，点击下一步查看教程！不推荐将本服务暴露到公网，这存在API key泄露的风险！',
        'mcpCreationFailed': 'MCP创建失败，已自动禁用，请点击重连按钮或者修改后重试',
        'inputMethod': '输入方式',
        'jsonInput': 'JSON输入',
        'formInput': '表单输入',
        'mcpName': 'MCP名称',
        'mcpURL': 'MCP URL',
        'mcpApiKey': 'MCP API Key',
        'mcpCommand': 'MCP命令',
        'mcpArgs': 'MCP参数(每个参数占一行)',
        'mcpEnv': 'MCP环境变量(格式：key=value，每个变量占一行)',
        'mcpApiKeyPlaceholder': '输入MCP的API Key，没有可不填',
        'mcpCommandPlaceholder': '输入MCP的命令，例如：uvx 或者 npx',
        'mcpArgsPlaceholder': 'arg1\narg2\narg3',
        'mcpEnvPlaceholder': 'key1=value1\nkey2=value2\nkey3=value3',
        'moreMCP': '更多MCP',
        'MCPvendor.MCP': 'MCP官方合集',
        'MCPvendor.awesome': 'Awesome MCP仓库',
        'MCPvendor.docker': 'Docker MCP仓库',
        'tools': '个工具',
        'clickToolInfo': '点击工具tag查看详细信息',
        'vmcSettings': 'VMC 协议设置',
        'vmcReceiveEnable': '启用接收（UDP）',
        'vmcReceivePort': '接收端口',
        'vmcSendEnable': '启用发送（UDP）',
        'vmcSendHost': '目标主机',
        'vmcSendPort': '目标端口',
        'syncExpression': '表情同步',
        'vmcp': 'VMC协议',
        'vmcpInstructions':'VMC（Virtual Motion Capture）协议 是一个基于UDP网络的、开放式的通信协议。它的核心目的是将人体动作捕捉数据从一台设备（发送端）实时传输到另一台设备或软件（接收端），从而实现虚拟角色（如VTuber虚拟形象）的实时驱动。agent party支持双向VMC协议，接收端的默认端口在39539，发送端的默认端口在39540。你可以在桌宠界面修改。',
        'vmcpReceive':'VMC接收端',
        'vmcpSend':'VMC发送端',
        'SampleText': '示例文本',
        'SampleTextPlaceholder': '请输入示例文本用于测试音色',
        'ClickToListen': '点击播放',
        'total_tokens': '总token',
        'first_token_latency': '首token延迟',
        'elapsedTime': '耗时',
        'EnterXR': '进入XR',
        'first_sentence_latency': '首句延迟',
        'TTSelapsedTime': 'TTS耗时',
        'moreButton': '更多',
        'brieflyButton': '隐藏推理和工具信息',
        'expandButton': '展开输入框',
        'fileButton': '上传文件',
        'imageButton': '上传图像',
        'reasonerButton': '推理',
        'deepSearchButton': '深度研究',
        'visionButton': '视觉',
        'desktopVisionButton': '桌面视觉',
        'text2imgButton': '文生图',
        'asrButton': '语音识别',
        'ttsButton': '文本转语音',
        'knowledgeBaseButton': '知识库',
        'webSearchButton': '网页搜索',
        'memoryButton': '角色卡',
        'codeButton': '代码工具',
        'stickerButton': '表情包',
        'haButton': '智能家居',
        'chromeButton': '浏览器控制',
        'agentButton': '智能体工具',
        'llmButton': 'LLM工具',
        'mcpButton': 'MCP',
        'a2aButton': 'A2A',
        'httpButton': 'HTTP工具',
        'comfyuiButton': 'ComfyUI工具',
        'vrmButton': 'VRM桌宠',
        'behaviorBotton': '自主行为',
        'showMoreButton': '显示更多按钮',
        'desktopVision': '桌面视觉（需要主模型具有视觉能力或启用视觉模型）',
        'maximize': '最大化',
        'minimize': '最小化',
        'close': '关闭',
        'assistantMode': '助手模式',
        'tutorial': '教程',
        'screenshot': '截图',
        'screenshotButton': '截图',
        'fixedWindow': '固定窗口',
        'unfixedWindow': '解除固定窗口',
        'CapsuleMode': '胶囊模式',
        'keyTriggered': '按键触发',
        'listening': '聆听中',
        'typing': '打字中',
        'speaking': '说话中',
        'idle': '空闲中',
        'ExitFirstPerson': '退出第一人称',
        'EnterFirstPerson': '进入第一人称(WASD+QE，ESC退出)',
        'addSystemPrompt': '添加系统提示词',
        'editSystemPrompt': '编辑系统提示词',
        'promptName': '提示词名称',
        'promptContent': '提示词内容',
        'usePrompt': '使用提示词',
        'moreSystemPrompt': '更多系统提示词',
        'prompt.awesome': 'awesome-chatgpt-prompts',
        'prompt.aiTool': 'system-prompts-and-models-of-ai-tools',
        'prompt.leaked': 'GPTs',
        'moreCard': '更多角色卡',
        'card.chub': 'Chub',
        'card.janitorai': 'JanitorAI',
        'card.pygmalion': 'Pygmalion',
        'selectGaussScene': '选择Gauss场景',
        'defaultScenes': '默认场景',
        'userScenes': '用户场景',
        'transparentBackground': '透明背景',
        'addGaussScene': '添加Gauss场景',
        'uploadGaussScene': '上传Gauss场景',
        'clickOrDropGaussScene': '点击或拖拽Gauss场景到此处',
        'GaussSceneDisplayName': 'Gauss场景名称',
        'clickOrDropGaussScene': '点击或拖拽Gauss场景到此处',
        'GaussSceneDisplayNamePlaceholder': '请输入Gauss场景名称',
        'translateBot': '翻译机器人',
        'targetLang': '目标语言(可手动填写)',
        'targetLangPlaceholder': '请输入或选择目标语言',
        'sourceTextPlaceholder': '请输入文本',
        'translatedTextPlaceholder': '翻译结果',
        "stop": "停止",
        "clear": "清空",
        'quickGenPlaceholder': '输入一句角色设想，例如：傲娇猫娘，生活在魔法森林',
        'oneClickGenCard': '一键生成',
        'oneClickExpand': '一键扩写',
        'noSystemPromptToExtend': '没有可扩写的系统提示词',
        'stopGenCard': '停止生成',
        'genSuccess': '生成成功',
        'genFailed': '生成失败',
        'startGen': '已开始生成，在后台运行不影响其他功能',
        'quickGenSystemPromptPlaceholder': '输入简单的系统提示，例如：mermaid画图助手',
        'AIgening': 'AI正在生成中',
        'CLItool': '命令行工具',
        'CLIEngine': '命令行引擎',
        'workspace': '工作区',
        'webNotSupported': '浏览器模式不支持，仅支持桌面端',
        "CLIButton": "命令行工具",
        'scriptExecuting': '脚本执行中,请请在终端中查看并添加配置，配置完成后，你需要重启agent party以获取最新的配置',
        'installClaudeCode': '安装Claude Code',
        'ClaudeCodeNotice1': '如果你还没有安装Claude Code，请点击下方按钮进行安装，或者在终端中直接输入`bash -c "$(curl -fsSL http://127.0.0.1:3456/sh/claude_code_install.sh)"`即可安装并配置自定义的API地址和API Key；注意！你需要在Claude Code配置支持Anthropic类型的接口，否则会报错。如果你重新配置了Claude code，需要重启agent party以获取最新的配置。不一定所有供应商的API都可以使用，请根据供应商的API文档进行配置。',
        'browserEmbedCodeInstructions': '浏览器嵌入代码，你可以将以下代码嵌入到你的网页中，以实现嵌入功能',
        'extraParamsNotice': '由于不同的服务商可能会给出不同的参数，这可能超出了标准的OpenAI格式，因此，你可能需要手动添加这些参数以适应你的服务商。如果你不确定如何调整这些参数，你可以查询服务商的接口文档。比较常见的参数例如：`enable_thinking`在一些服务商接口中可以控制部分模型是否开启推理。',
        'enabledCCconfig': '启用自定义配置（关闭时使用系统环境变量，开启时使用下方你提供的模型）',
        'anthropicSupportedProviders': '配置支持Anthropic的供应商："Anthropic", "深度求索", "硅基流动", "智谱AI", "月之暗面", "阿里云百炼", "魔搭"',
        'selectSystemPrompt': '选择系统提示词',
        'selectSystemPromptPlaceholder': '请选择系统提示词',
        'QwenCodeNotice1':'推荐使用`npm install -g @qwen-code/qwen-code`安装',
        'installQwenCode': '安装Qwen Code',
        'enableDesktopVisionWakeWord': '启用桌面视觉唤醒词(在你开启桌面视觉生效)',
        'desktopVisionWakeWord': '桌面视觉唤醒词(可以为多个，一行一个)',
        'desktopVisionWakeWordPlaceholder': '请输入桌面视觉唤醒词(可以为多个，一行一个)',
        'FullTextReading': '全文阅读',
        'SegmentedReading': '分段阅读',
        'segmentRead': '分段阅读',
        'playNextSegment': '播放下一段',
        'continuousPlay': '连续播放',
        'stopSegmentTTS': '停止分段阅读',
        'noSegmentList': '没有分段列表',
        'NotExtension': '无扩展插件，扩展面板默认为渲染最后一条消息',
        "defaultView": "默认视图",
        "defaultViewDescription": "显示扩展选择界面",
        "availableExtensions": "可用扩展",
        "noExtensionsFound": "未找到扩展",
        "noContent": "没有内容可显示",
        "switchExtension": "切换扩展",
        "extension": "扩展",
        'clickToSwitchExtensions': '点击切换扩展',
        'clickToSwitchModel': '点击切换模型',
        'useExtension': '使用扩展',
        'addNewExtension': '添加新扩展',
        'waitExtensionInstall': '请等待扩展安装，安装成功会自动刷新扩展页面',
        'deleteSuccess': '删除成功',
        'deleteFailed': '删除失败',
        'extensionExists': '扩展已存在',
        'or': '或',
        'localZipInstallation': '本地zip安装',
        'goToExtensionPage': '前往扩展页面添加',
        'getPlugin': '查看插件列表',
        'refreshList': '刷新列表',
        'getPluginInstructions': '请输入插件的github仓库URL或者直接上传本地的zip包',
        'install': '安装',
        'uninstall': '卸载',
        'refreshedSuccess': '刷新成功',
        'refreshedFailed': '刷新失败',
        'viewRepository': '查看仓库',
        'addYourExtensions': '添加你的扩展',
        'openInWindow': '在新窗口中打开',
        'openInBrowser': '在浏览器中打开',
        'sherpa-onnx-sense-voice': 'Sherpa ONNX(需本地下载)',
        'edgetts': 'Edge TTS(免费，需联网)',
        'modelExist': '模型已存在，可直接使用',
        'downloadFromModelScope': '从魔搭社区下载',
        'downloadFromHuggingFace': '从HuggingFace下载',
        'deleteModel': '删除模型',
        'Re-download': '重新下载',
        'installed': '已安装',
        'notInstalled': '未安装',
        'thinking': '思考中...',
        'update': '更新',
        'downloadNodeJS': '下载NodeJS',
        'webBilibiliInfo': 'web端模式仅供学习交流使用，请勿用于实际生产环境，如需长期稳定使用，请将直播类型选择为B站直播开放平台模式',
        'QQbotNotice':'使用前，请查看AIGC接入QQ机器人须知',
        'loadExtension(node)': '加载扩展(node)',
        'loadExtension(static)': '加载扩展(static)',
        'waitForLoadingExt': '等待扩展加载完成',
        'agplNotice': '完整AGPL协议内容请访问：',
        'thirdPartyNotice': '第三方许可证清单：',
        'vectorInteractTitle': '编辑记忆库',
        'memoryText': '记忆文本',
        'createTime': '创建时间',
        'lastTime': '修改时间',
        'vectorInteract': '编辑记忆',
        'downloadMemory': '下载角色卡',
        'editMemory': '编辑角色卡',
        'removeMemory': '删除角色卡',
        'sqlControl': '数据库工具',
        'downloadUV': '下载uv',
        'sqlEngine': 'SQL引擎',
        'DB_URL': '数据库URL',
        'sqlNotice1': '1. 使用前，你的电脑上需要用uv，可以在`https://docs.astral.sh/uv/getting-started/installation/`下载安装',
        'sqlNotice2': '2. 填写好对应的数据库信息后，启动即可。sqlite数据库需要填入`.db`文件的绝对路径。',
        'success_start_sqlControl': '启动成功',
        'error_start_sqlControl': '启动失败',
        'success_stop_sqlControl': '停止成功',
        'error_stop_sqlControl': '停止失败',
        'user': '用户',
        'host': '主机',
        'port': '端口',
        'password': '密码',
        'dbname': '数据库',
        'dbpath': '数据库路径',
        'sqlButton': '数据库',
        'nodeInstalled': 'NodeJS已安装',
        'nodeNotInstalled': 'NodeJS未安装',
        'installNode': '安装NodeJS',
        'uvInstalled': 'uv已安装',
        'uvNotInstalled': 'uv未安装',
        'installUv': '安装uv',
        'gitInstalled': 'Git已安装，可用于安装扩展',
        'gitNotInstalled': 'Git未安装',
        'installGit': '安装Git',
        'resume_read': '继续阅读',
        'pause_read': '暂停阅读',
        'development': '开发工具',
        'getYouTubeAPIKey': '获取 YouTube API Key',
        'youtubeVideoId': 'YouTube 视频 ID',
        'youtubeVideoIdPlaceholder': '填入youtube视频URL中的视频ID，一般为`v=`后面的内容',
        'youtubeApiKey': 'YouTube API Key',
        'youtubeApiKeyPlaceholder': '填写你的 Google API Key',
        'youtubeHelp': '1. 复制网址中的视频 ID → 2. 填写上方两项即可开始监听弹幕',
        'liveConfig': '直播配置',
        'bilibiliLive': 'B站直播',
        'youtubeLive': 'YouTube 直播',
        'twitchLive': 'Twitch 直播',
        'getTwitchAccessToken': '获取机器人账号的Twitch Access Token',
        'twitchChannel': 'Twitch 频道',
        'twitchChannelPlaceholder': '填写直播账号 Twitch 频道名',
        'twitchAccessToken': 'Twitch Access Token',
        'twitchAccessTokenPlaceholder': '填写你的 Twitch Access Token',
        'twitchHelp': '1. 点击上方Access Token的获取网站，登录你的机器人账号 → 2. 复制生成的 Token → 3. 填写机器人账号的Twitch Access Token和直播账号的Twitch 频道名 → 4. 开始监听弹幕',
        'feishuBot': '飞书机器人',
        'feishu_bot_config': '飞书机器人配置',
        'gotoFeishuBot': '前往飞书开放平台创建机器人',
        'feishuBotNotice': '三分钟快速创建机器人文档',
        'enableTTS': '启用TTS',
        'discordBot': 'Discord机器人',
        "discord_bot_config": "Discord 机器人",
        "gotoDiscordBot": "前往 Discord Developer",
        "discordBotNotice": "官方文档",
        "enterDiscordBotToken": "请输入 Discord Bot Token",
        'danmakuWakeWord': '弹幕唤醒词',
        'wakeWordPlaceholder': '不填写则默认为空，即不使用唤醒词，填入则只有包含该词的弹幕才会触发',
        'openaiStyleAPIKey': 'APi密钥（本接口并没有密钥验证，和ollama类似，密钥随便填）',
        "telegram_bot_config": "Telegram 机器人配置",
        "gotoTelegramBot": "前往 BotFather",
        "telegramBotNotice": "API 文档",
        "enterTelegramBotToken": "请输入从 BotFather 获取的 Token",
        'telegramBot': 'Telegram机器人',
        "systemProxy": "跟随系统代理",
        "manualProxy": "手动代理",
        "noneProxy": "不使用代理",
        'logoPage': 'Logo页',
        'openaiStream': '启用流式模式',
        'customStream': '启用流式模式(部分模型不支持)',
        'serverLogs': '服务器日志',
        'refresh': '刷新',
        'EmbeddingFailed': 'Embedding模型类型不匹配或者无法正常调用',
        'minilmNotice':'启用知识库功能前，你必须配置词嵌入模型，下方提供了一种轻量级的本地模型，你可以点击下载到本地，或者选择你在模型服务中添加的其他词嵌入模型',
        'minilmNotice2':'第一个选项是一个本地词嵌入模型，需要你手动点击下载，请在这个界面下载',
        'minilmNotice3':'启用角色长期记忆功能前，你必须配置词嵌入模型，下方提供了一种轻量级的本地模型，你可以点击下载到本地，或者选择你在模型服务中添加的其他词嵌入模型',
        'minilmNotice4':'第二个选项是一个本地词嵌入模型，需要你手动点击下载，请在这个界面下载',
        'goToMemory': '前往角色卡配置界面',
        'systemtts': '系统TTS(效果一般，速度极快，可用于测试)',
        'Timbre': '音色',
        'sysTTS': '系统TTS',
        'noSystemVoiceDetected': '未检测到系统语音',
    },
    'en-US': {
        'chat': 'Chat',
        'mainModel': 'Main Model',
        'tools': 'Tools',
        'reasoner': 'Reasoner',
        'webSearch': 'Web Search',
        'knowledgeBase': 'Knowledge Base',
        'keyBox': 'Model Service',
        'api': 'API',
        'systemSettings': 'System Settings',
        'uploadFile': 'Upload File',
        'uploadImage': 'Upload Image',
        'clickOrDrop': 'Click or drop files to this area',
        'knowledgeBaseSelection': 'Knowledge Base Selection',
        'noDescription': 'No description yet',
        'noKnowledgeBase': 'No knowledge base yet, please',
        'goToKnowledgeBase': 'Go to knowledge base page',
        'add': 'Add',
        'clearChat': 'Clear Chat',
        'deepThinking': 'Deep Thinking',
        'deepResearch': 'Deep Research',
        'stopGenerating': 'Stop Generating',
        'sendMessage': 'Send Message',
        'inputMessage': 'Please enter the message, press the enter key to send, and press the shift + enter key to wrap the line at the same time...',
        'addNewProvider': 'Add New Provider',
        'apiAddress': 'API Address',
        'apiAddressPlaceholder': 'API Address',
        'apiKey': 'API Key',
        'apiKeyPlaceholder': 'API Key',
        'modelId': 'Model ID (Manual input allowed)',
        'modelIdPlaceholder': 'Click magnifying glass to get list',
        'selectModel': 'Select Model',
        'modelList': 'Model List',
        'provider': 'Provider',
        'pleaseSelect': 'Please Select',
        'customURL': 'Custom URL',
        'defaultConfigInfo': 'Default Configuration Information',
        'autoFilledDefaultAPI': 'Automatically filled the default API address for',
        'enterApiKeyThenClick': 'Enter the API key and then click the magnifying glass in the upper right corner to get the model ID list',
        'localConfigNotice': 'When you choose Ollama, Vllm, LM studio, Xinference, and Dify, you need to pay attention to modifying the URL to your actual URL. Dify currently only supports chat assistant, agent, and Chatflow access modes!',
        'cancel': 'Cancel',
        'confirm': 'Confirm',
        'confirmAdd': 'Confirm Add',
        'currentTime': 'Current Time',
        'fakeInference': 'Fake Inference',
        'latexRenderingEnhancement': 'LaTeX Formula Rendering Enhancement',
        'languageTone': 'Language Tone',
        'assistantLanguage': 'Assistant Language',
        'assistantTone': 'Assistant Tone',
        'searchEngine': 'Search Engine',
        'webCrawling': 'Web Crawling',
        'webSearchTiming': 'Web Search Timing',
        'beforeThinking': 'Triggered before replying',
        'afterThinking': 'Triggered after intent recognition',
        'both': 'Both',
        'resultCount': 'Result Count',
        'baseURL': 'Base URL',
        'searxngURLPlaceholder': 'SearXNG URL (docker deployment exposed port)',
        'tavilyAPIKeyPlaceholder': 'Tavily API Key',
        'jinaAPIKeyPlaceholder': 'Jina API Key (optional)',
        'crawl4AiURLPlaceholder': 'Crawl4Ai url (docker deployment exposed port)',
        'crawl4AiAPIKeyPlaceholder': 'Crawl4Ai API Key (optional)',
        'comfyuiAPIKey': 'ComfyUI API Key (optional)',
        'comfyuiConfigInfo': 'comfyui configuration instructions',
        'comfyuiConfigInfo1': 'The comfyui API key is optional, and you do not need to fill it out if you don not need to call the comfyui built-in API nodes.',
        'comfyuiConfigInfo2': 'You can enter multiple server addresses, and the agent party will automatically load balance these servers.',
        'comfyuiConfigInfo3': 'Since the images generated by comfyui are all local, when they are accessed by robots on the public network (such as QQ robots), it is necessary to enable the image hosting configuration of the robot page.',
        'comfyuiConfigInfo4': 'If you are using the ComfyUI integration package, the server address is defaulted to port 8188. If it is the desktop version, it is defaulted to port 8000.',
        'comfyuiConfigInfo5': 'When you want to upload a comfyui workflow as a tool, you must turn on comfyui is developer mode, and then the uploaded workflow must be a workflow downloaded in API format',
        'useSuperAPI': 'Use OpenAI API',
        'modelName': 'Model Name',
        'apiEndpoint': 'API Endpoint',
        'exampleLanguage': 'Example Language',
        'copy': 'Copy',
        'basicConfiguration': 'Basic Configuration',
        'selectProvider': 'Select Provider',
        'firstTimeUse': 'For the first time, you can',
        'keyBoxInterface': 'Model Service Interface',
        'addProviderReturnSelect': 'Add a new provider. After adding the provider, return to this page and select the provider to continue.',
        'advancedConfiguration': 'Advanced Configuration',
        'temperature': 'Temperature',
        'outputLength': 'Output Length',
        'stopWords': 'Stop Words',
        'stopWordsPlaceholder': 'Enter and select stop words. Do not use spaces as stop words.',
        'conversationRounds': 'Conversation Rounds (when it is 0, memory compression will not occur)',
        'files': ' files',
        'model': 'Model',
        'default': 'Default',
        'segment': 'Segment',
        'segmentSize': 'Segment Size',
        'overlapSize': 'Overlap Size',
        'returnParagraphs': 'Return Paragraphs',
        'knowledgeBaseGenerating': 'Knowledge Base Generating...',
        'addNewKnowledgeBase': 'Add New Knowledge Base',
        'knowledgeBaseName': 'Knowledge Base Name',
        'enterKnowledgeBaseName': 'Enter knowledge base name',
        'knowledgeBaseIntro': 'Knowledge Base Intro',
        'enterKnowledgeBaseIntro': 'Enter knowledge base introduction',
        'advancedSettings': 'Advanced Settings',
        'advancedSettingsClickExpand': 'Advanced Settings(click to expand)',
        'systemLanguage': 'System Language',
        'themeSettings': 'Theme Settings',
        'launching': 'Launching...',
        'launchBrowserMode': 'Launch Browser Mode',
        'reasonerConfig': 'Reasoner Config',
        'createImmediately': 'Create Immediately',
        'vendor.custom': 'Custom OpenAI',
        'vendor.OpenAI': 'OpenAI',
        'vendor.Ollama': 'Ollama',
        'vendor.Vllm': 'Vllm',
        'vendor.xinference': 'Xinference',
        'vendor.Dify': 'Dify',
        'vendor.Deepseek': 'Deepseek',
        'vendor.Volcano': 'Volcano Engine',
        'vendor.siliconflow': 'Silicon Flow',
        'vendor.302.AI':'302.AI',
        'vendor.aliyun': 'Alibaba Cloud Bailian',
        'vendor.ZhipuAI': 'Zhipu AI',
        'vendor.moonshot': 'Moonshot AI',
        'vendor.minimax': 'Minimax',
        'vendor.LMstudio': 'LM Studio',
        'vendor.Gemini': 'Google AI studio',
        'vendor.Anthropic': 'Anthropic',
        'vendor.Grok': 'Grok (xAI)',
        'vendor.mistral': 'Mistral AI',
        'vendor.lingyi': '01.AI',
        'vendor.baichuan': 'Baichuan AI',
        'vendor.qianfan': 'Baidu Qianfan',
        'vendor.hunyuan': 'Tencent Hunyuan',
        'vendor.stepfun': 'StepFun',
        'vendor.o3': 'O3',
        'vendor.aihubmix': 'AIHubMix',
        'vendor.ocoolai': 'OcoolAI',
        'vendor.Github': 'GitHub',
        'vendor.dmxapi': 'DMX API',
        'vendor.openrouter': 'OpenRouter',
        'vendor.together': 'Together AI',
        'vendor.fireworks': 'Fireworks AI',
        'vendor.360': '360 ZhiNao',
        'vendor.Nvidia': 'NVIDIA',
        'vendor.hyperbolic': 'Hyperbolic',
        'vendor.jina': 'Jina AI',
        'vendor.gitee': 'Gitee',
        'vendor.ppinfra': 'PPIO Infra',
        'vendor.perplexity': 'Perplexity AI',
        'vendor.infini': 'Infini-AI',
        'vendor.modelscope': 'ModelScope',
        'vendor.tencent': 'Tencent Cloud',
        'theme.light': 'Light',
        'theme.dark': 'Dark',
        'theme.midnight': 'Midnight',
        'theme.desert': 'Desert',
        'theme.neon': 'Neon',
        'theme.marshmallow': 'Marshmallow',
        'theme.party': 'Party',
        'theme.ink': 'ink-water',
        'copy_success': 'Copied to clipboard',
        'copy_fail': 'Copy failed',
        'settings_save_failed': 'Settings save failed',
        'invalid_file': 'Invalid file',
        'file_upload_failed': 'File upload failed',
        'message.stopGenerate': 'Stop Generating',
        'fetch_models_failed': 'The vendor does not support model list acquisition or API keys not filled in',
        'vendor_required': 'Please select a vendor',
        'kb_created_successfully': 'Knowledge base created successfully',
        'kb_creation_failed': 'Knowledge base creation failed',
        'kb_deleted_successfully': 'Knowledge base deleted successfully',
        'kb_deletion_failed': 'Knowledge base deletion failed',
        'kb_status_change_failed': 'Knowledge base status change failed',
        'kb': 'Knowledge Base',
        'enabled': 'Enabled',
        'disabled': 'Disabled',
        'browserUse': 'Browser Control',
        'browserConfiguration': 'Browser Control Configuration',
        'chromePath': 'Chrome Path',
        'chromePathPlaceholder': 'Please enter the path to the Chrome executable(optional)',
        'resetToDefault': 'Reset to default',
        'browserConfigNotice': 'After adding a vendor, please return to this page and select [Models with Visual Capabilities] to continue.Visual model names typically include keywords such as "vision", "image", and "img". For example: qwen-vl, etc',
        'mainmodelnotice': 'After adding a vendor, please return to this page and select [Models with tool capabilities and stable JSON format output capabilities] to continue.',
        'addresonerNotice': 'After adding a vendor, please return to this page and select [Models with reasoning capabilities] to continue.The name of the reasoning model generally includes keywords such as "reasoner" and "R1".For example: deepseek-r1, etc',
        'addemdNotice': 'After adding a vendor, please return to this page and select [word embedding model] to continue. Word embedding model names generally contain keywords such as "embedding", "ebd", "bge", etc.For example: bge-m3, etc',
        'autoUpdateSettingNote':'After turning it on, the agent will reply slower. If you do not select any word embedding model, [long-term memory] will not be enabled.',
        "NoLongTermMemory": "Disabled long-term memory",
        'usePlaywright': 'Whether to use Playwright',
        'mcpServers': 'MCP Servers',
        'addNewMCP': 'Add MCP Server',
        'mcpJsonFormat': 'JSON Format Example:',
        'mcpJsonInput': 'MCP Configuration (JSON)',
        'mcpJsonPlaceholder': 'Enter the full MCP server configuration. If the path contains`\\`characters, change it to`/`or`\\\\`...',
        'autoApprove': 'Auto Approve',
        'confirmDeleteTitle': 'Confirm Delete',
        'confirmDeleteMCP': 'Are you sure to delete this MCP server?',
        'mcpAddedSuccess': 'MCP server added successfully',
        'mcpAddFailed': 'Failed to add MCP server',
        'invalidJsonFormat': 'Invalid JSON format',
        'invalidMCPConfig': 'Missing required fields (command/args)',
        'serverType': 'Server Type',
        'stdioServer': 'Standard I/O',
        'sseServer': 'Server-Sent Events (SSE)',
        'wsServer': 'WebSocket',
        'mcpAdded': 'MCP server added successfully',
        'invalidCommand': 'Invalid command path',
        'duplicateServer': 'Server name already exists',
        'mcpServersManagement': 'MCP Servers Management',
        'manageMCPServers': 'Manage MCP Servers', 
        'nomcpServers': 'No MCP servers found',
        'goTomcpServers': 'Go to MCP Servers page',
        'callingMethod': 'Calling Method',
        'modelService': 'Model Service',
        'addNewAgent': 'Add New Agent',
        'agentName': 'Agent Name',
        'systemPrompt': 'System Prompt',
        'editAgent': 'Edit Agent',
        'addAgent': 'Add Agent',
        'agentName': 'Agent Name',
        'agentNamePlaceholder': 'Please enter the agent name',
        'systemPrompt': 'System Prompt',
        'systemPromptPlaceholder': 'Please enter a system prompt, for example: Always communicate with the user in their language',
        'createAgent': 'Create Agent',
        'copyAgentId': 'Copy Agent ID',
        'editAgent': 'Edit Agent',
        'deleteAgent': 'Delete Agent',
        'agentInfo': 'After clicking Create Agent, a callable agent will be generated according to your current configuration, including system prompts, model services, knowledge bases, tools, inference models, MCP servers, A2A servers, etc. You can click on the tab in the upper left corner to copy the agent ID as the `model` parameter in the OpenAI API request, which will call the corresponding agent.',
        'agents': 'Agents',
        'agentSnapshot': 'Agent Snapshot',
        'mainAgent': 'Main Agent', 
        'toolAgents': 'Tool Agents',
        'agentSettings': 'Agent Settings',
        'noagents': 'No Agents found',
        'goToagents': 'Go to Agents page',
        'defaultAgent': 'Default Agent',
        'aboutUs': 'About Us',
        'a2aServers': 'A2A Servers',
        'addA2AServer': 'Add A2A Server',
        'a2aInitFailed': 'A2A initialization failed',
        'noA2AServersPrompt': 'No A2A servers added yet. Go to add one?',
        'noA2AServers': 'No available A2A servers',
        'a2aServersManagement': 'A2A Servers Management', 
        'goToAdd': 'Add Now',
        'A2AUrl': 'A2A URL',
        'preview': 'Preview',
        'mermaidError': 'Mermaid error',
        'useWebmode': 'Use Web Mode',
        'chatHistory': 'Chat History',
        'newChat': 'New Chat',
        'messages': 'Messages',
        'untitled': 'Untitled',
        'cannotDeleteActive': 'Cannot delete active chat',
        'noChatHistory': 'No chat history',
        'emptyConversationRemoved': 'Empty conversation removed',
        'newVersion': 'New Version Available',
        'updateAvailable': 'New version found',
        'downloading': 'New version downloading',
        'installNow': 'Install Now',
        'updateReady': 'Update ready',
        'updateDownloaded': 'Download complete, click to install',
        'updateSuccess': 'Update successfully',
        'llmTool': 'LLM Tools',
        'addLLMTool': 'Add LLM Tool',
        'toolName': 'Tool Name',
        'interfaceType': 'Interface Type', 
        'description': 'Description',
        'modelName': 'Model Name',
        'toolNamePlaceholder': 'e.g. comfyui LLM party agent',
        'selectInterfaceType': 'Select Interface Type',
        'descriptionPlaceholder': 'For agent tool selection reference...',
        'modelPlaceholder': 'Input or select model',
        'llmTools': 'LLM Tools',
        'llmToolsManagement': 'LLM Tools Management',
        'noLLMTools': 'No LLM Tools Configured',
        'gollmTools': 'Go to LLM Tools page',
        'superAPIInstructions': 'The default model name is super-model, which calls the agent in the current configuration. To call another agent, enter the corresponding agent name or ID in Model Name, which can be viewed and copied on the Agent Snapshot page.',
        'extraParams': "Extra Parameters",
        'paramName': "Param Name",
        'paramType': "Param Type",
        'paramValue': "Param Value",
        'string': "String",
        'integer': "Integer", 
        'float': "Float",
        'boolean': "Boolean",
        'save': 'Save',
        'editSystemPrompt': 'Edit System Prompt',
        'editMessage': 'Edit Message', 
        'enterContent': 'Please enter content',
        'edit': 'Edit',
        'defaultSystemPrompt': ' ',
        'system_prompt': 'System Prompt:',
        'expand': 'Expand',
        'collapse': 'Collapse',
        'delete': 'Delete',
        'reset': 'Reset',
        'selectOrTypeModel': 'Select or Type Model Name',
        'expandAside': 'Expand Aside',
        'collapseAside': 'Collapse Aside',
        'pollinationsImageGeneration': 'Pollinations Image Generation',
        'languagePlaceholder': 'Please input language',
        'tonePlaceholder': 'Please input tone',
        'agentSuite': 'Agent',
        'modelConfig': 'Model',
        'modelService': 'Model Service', 
        'mainModel': 'Main Model',
        'reasonerModel': 'Reasoner Model',
        'toolkit': 'Toolkit',
        'apiGroup': 'Developer',
        'openaiStyleAPI': 'OpenAI-style API', 
        'browserMode': 'Browser Mode',
        'file_type_error': 'File type is not supported',
        'image_type_error': 'Image type is not supported',
        'uploadImage': 'Upload Image',
        'newChat': 'New Chat',
        'visionModel': 'Vision Model',
        'visionSettings': 'Vision Settings',
        'addVisionNotice': 'After adding a vendor, then return to this page and select 【Visual Model】 to continue. The visual model name generally contains keywords such as "vision", "v", "o", etc.For example: qwen-vl, etc',
        'vision': 'Vision',
        'KBSearchTiming': 'Knowledge Base Search Timing',
        'error_unknown': 'Unknown error',
        'kbSettings': 'Knowledge Base Settings',
        'addKnowledgeBase': 'Add Knowledge Base',
        'embeddingProvider': 'Embedding Provider',
        'reasoningProvider': 'Reasoning Provider',
        'visionProvider': 'Vision Provider',
        'gotoAPI': 'Obtain API key or relevant documentation',
        'gotoGithub': 'Go to Github documentation',
        'streamableHttpServer': 'Streamable HTTP',
        'rerankEnable': 'Enable Rerank Model',
        'returnChunks': 'Number of results returned',
        'rerankProvider': 'Rerank Model Provider',
        'rerankmodelnotice': 'After adding a vendor, then return to this page and select 【Re-rank Model】 to continue. The re-rank model name generally contains keywords such as "rerank", "rank", etc. Currently, the supplier only supports jina and Vllm',
        'storage': 'Storage',
        'storageText': 'Text File',
        'storageImage': 'Image File',
        'storageVideo': 'Video File',
        'keywordSemanticWeight': 'Keyword↔️Semantic Search Weight',
        'keyword': 'Keyword',
        'semantic': 'Semantic',
        'MCPStyleAPI': 'MCP Style API',
        'MCPAPIInstructions':'It can be used by selecting the SSE protocol in other clients that support MCP and filling in the corresponding URL.',
        'docker': 'Docker',
        'dockerInstructions':'Please ensure that you have installed Docker and the Docker service is running. You can copy the following code and run it in the terminal to start a local service of super agent party. ⭐Note! `./super-agent-data` can be replaced with any local folder, after Docker starts, all data will be cached to that local folder and will not be uploaded anywhere.',
        'fileDeleted': 'File deleted',
        'fileDeleteFailed': 'File delete failed',
        'noFiles': 'There are no text files in the storage space',
        'noImages': 'There are no image files in the storage space',
        'noVideos': 'There are no video files in the storage space',
        'preview': 'Preview',
        'fileGet': 'Obtaining the content of file/image URLs',
        'memory': 'SillyTavern Cards',
        'addNewMemory': 'Add New Character Cards',
        'memoryConfig': 'Character Cards Configuration',
        'vectorProvider': 'Vector Provider',
        'selectMemoryPlaceholder': 'Please select a Character Cards',
        'selectMemory': 'Current Character Cards',
        'availableVoice':'Available Character Voice',
        'memoryEnable': 'Enable Character Cards',
        'memoryInterface':'Character Cards Interface',
        'memoryName': 'Character Cards Name',
        'worldviewSetting': 'Worldview Setting',
        'settingName': 'Setting Name',
        'settingValue': 'Setting Value',
        'basicCharacter': 'Basic Character',
        'inputBasicCharacter': 'Please enter basic character',
        'interpreter': 'Code tool',
        'codeEngine': 'Code Engine',
        'e2bAPIKeyPlaceholder': 'Please input e2b API key',
        'networkSettings': 'Network Settings',
        'local': 'Only visible locally',
        'global': 'Visible to all devices',
        'localVendor': 'Local Model',
        'restartConfirmText': 'Do you want to confirm the restart to update the current configuration?',
        'restartConfirm': 'Restart Confirmation',
        'confirmRestart': 'Confirm Restart',
        'memoryDeleted': 'Memory deleted',
        'memoryDeleteFailed': 'Memory delete failed',
        'AgentDeleted': 'Agent deleted',
        'AgentDeleteFailed': 'Agent delete failed',
        'sandboxURLPlaceholder': 'Please enter Sandbox Fusion URL',
        'addCustomHttpTool': 'Add Custom HTTP Tool',
        'editCustomHttpTool': 'Edit Custom HTTP Tool',
        'HTTPNamePlaceholder': 'Enter tool name. Can only contain English characters',
        'url': 'URL',
        'urlPlaceholder': 'Enter the tool URL',
        'headers': 'Headers',
        'headersPlaceholder': `{
    'Content-Type': 'application/json',
    'Authorization': 'Bearer YOUR_API_KEY_HERE'
}`,
      'body': 'Body',
      'bodyPlaceholder': `Enter the body in JSON Schema format. For example:
{
    "type": "object",
    "properties": {
        "name": {
            "type": "string",
            "description": "The name of the person",
            "default": "John Doe",
            "enum": ["John Doe", "Jane Doe"]
        },
        "age": {
            "type": "integer",
            "description": "The age of the person",
            "minimum": 0,
            "maximum": 120,
            "default": 30,
        }
    },
    "required": ["name", "age"]
}`,
        'bodyJsonSchemaFormat': 'The body should be in JSON Schema format',
        'customHttpTool': 'HTTP Tool',
        'HttpToolsManagement': 'HTTP Tools Management',
        'noHttpTools': 'There are no HTTP tools',
        'goHttpTools': 'Go to HTTP Tools Management',
        'qqBot': 'QQ Bot',
        "qq_bot_config": "QQ Bot",
        "enter_qq_bot_app_id": "Enter QQ Bot App ID",
        "enter_qq_bot_secret": "Enter QQ Bot App Secret",
        "start_bot": "Start Bot",
        "stop_bot": "Stop Bot",
        "reload_bot": "Reload Bot",
        'separators': 'Message Separators',
        'deployBot': 'Bot',
        'qqBotConfig': 'QQ Bot Configuration',
        'selectExistingMemory': 'Starting from existing character cards',
        'selectExistingMemoryPlaceholder': 'Please select an existing character card',
        'none': 'None',
        'reasoningVisibleEnable': 'Reasoning process visible',
        'text2imgModel': 'Text2Image Model',
        'imgModel':'ImgGen Model',
        'width': 'Width',
        'height': 'Height',
        'openaiLike': 'OpenAI-like interface',
        'pollinations': 'pollinations (free, requires internet connection, text-to-image only)',
        'openaiImageLike': 'OpenAI-like Image Generation Interface (text-to-image only)',
        'openaiChatLike': 'OpenAI-like Chat Completion Interface (some models support this compatible interface, e.g., nano banana)',
        'imgModelProvider': 'Image Generation Model Provider',
        'addimgNotice': 'After adding a provider, please return to this page and select 【Image Generation Model】 to continue. Image Generation model names generally include keywords such as "img" or "image". Currently supports all providers compatible with the OpenAI chat completion interface.',
        'addText2imgNotice': 'After adding a provider, please return to this page and select 【Text-to-Image Model】 to continue. Text-to-Image model names generally include keywords such as "img" or "image". Currently supports all providers compatible with the OpenAI image generation interface.',
        'addAsrNotice': 'After adding a provider, please return to this page and select the [Speech Recognition Model] to proceed. Speech recognition model names generally include keywords such as "transcribe" or "speech." Currently supports all OpenAI-compatible providers.',
        'text2imgModelProvider': 'Text2Image Model Provider',
        'asrModelProvider': 'Speech Recognition Model Provider',
        'size': 'Size',
        'sizeAllowCreate': 'Size (Manual input allowed)',
        'auto': 'Auto',
        'text2img': 'Text2Image',
        'imgGen': 'Image Generation',
        'randomSetting': 'Random Setting',
        'worldviewSettingNote': 'Worldview setting: similar to a lorebook, when a user mentions or the intelligent entity mentions a setting name in the last conversation, the intelligent entity will see the setting value in the conversation.',
        'basicCharacterNote': 'Character setting: All information will be directly added to the system prompt, and the intelligent experience will conduct a dialogue according to the character setting.',
        'randomSettingNote': 'Random setting: After opening a new conversation, a random setting will be added to the system prompt, and the AI will conduct a conversation according to the random setting.',
        'text2imgEngine': 'Text-to-image interface',
        'imgEngine': 'Image generation interface',
        'autoUpdateSetting': 'Long-term memory',
        'autoUpdateNote': 'Long-term memory: It will dynamically update a memory database according to the content of the conversation. During the conversation, it will return relevant memories to the intelligent entity based on the user questions.',
        'quickRestartEnable': 'Enable/Restart shortcut command',
        'bot_config': 'Bot General Config',
        'imgHost': 'Image Host / Flie Host',
        'imgHostType': 'Image Host / Flie Host Type',
        'mcpDeleted': 'Memory Control Panel deleted',
        'mcpDeleteFailed': 'Memory Control Panel delete failed',
        'gotoEasyImage2Github': 'Get the EasyImage2 GitHub documentation',
        'collapseInput': 'Collapse Input',
        'expandInput': 'Expand Input',
        'googleCSEIdPlaceholder': 'Please enter Google Custom Search Engine ID',
        'searchEndpoint': 'Search Endpoint',
        'gotoQQbot': 'Go to the QQ Open Platform.' ,
        'comfyuiServers': 'ComfyUI Servers',
        'addServer': 'Add Server',
        'server': 'Server',
        'noServerTip': 'No server added',
        'comfyuiConfig': 'ComfyUI Config',
        'comfyuiDisplay': 'ComfyUI Display',
        'connectServer': 'Not connected to the server.',
        'connectComfyUIServer': 'Connect to ComfyUI Server',
        'removeComfyUIServer': 'Remove ComfyUI Server',
        'comfyuiWorkflowTool': 'ComfyUI Workflow Tool',
        'ComfyUIManagement': 'ComfyUI Management',
        'noWorkflows': 'No workflows',
        'goComfyui': 'Go to ComfyUI Management',
        'uploadWorkflowJson': 'Upload Workflow JSON',
        'textInput': 'Text Input',
        'selectTextInput': 'Please select text input',
        'imageInput': 'Image Input',
        'selectImageInput': 'Please select image input',
        'seedInput': 'Seed Input',
        'selectSeedInput': 'Please select seed input',
        'comfyuiWorkflowInfo': 'As much as possible, describe the function of this workflow in the first input box, as well as the role of each input, for example: the first text input is a forward image prompt, the second text input is a reverse image prompt, the third image input is an image that needs to be modified, and the fourth image input is a style image that needs to be referenced. Not every text and image input item needs to be filled in, and unfilled input items will be ignored.',
        'repo_owner': 'Repository Owner',
        'repo_owner_placeholder': 'Please enter the repository owner',
        'repo_name': 'Repository Name',
        'repo_name_placeholder': 'Please enter the repository name',
        'branch': 'Branch',
        'branch_placeholder': 'Please enter the branch name',
        'token': 'Private token',
        'token_placeholder': 'Please enter the private token',
        'stickerPacks': 'Sticker Packs',
        'createStickerPack': 'Create Sticker Pack',
        'packName': 'Sticker/Images Pack Name',
        'uploadStickers': 'Upload Stickers/Images',
        'addTags': 'Add Tags',
        'addStickerPack': 'Add Sticker/Images Pack',
        'sticker': 'Sticker',
        'imageDescription': 'Image Description',
        'enterDescriptionPlaceholder': 'Please enter a description',
        'sticker/image': 'Sticker/Image Pack',
        'utilityTools': 'Utility Tools',
        'images': 'Images',
        'stickerPackCreated': 'Sticker/Image Pack Created',
        'createFailed': 'Create Failed',
        'stickerPackDeleted': 'Sticker/Image Pack Deleted',
        'StickerPackManagement': 'Sticker/Image Pack Management',
        'noStickerPack': 'No Sticker/Image Pack',
        'goStickerPack': 'Go to Sticker/Image Pack Management',
        'clickOrDropJson': 'Click or drag the memory JSON file here',
        'clickOrDropCharacterCards': 'Click or drag the character card JSON/PNG file here',
        'noResults': 'No results',
        "searchChatHistoryPlaceholder": "Search chat history",
        'fastAPIDocs': 'FastAPI Docs',
        'asyncTools': 'Async Tools',
        'asyncToolsNotice': 'When a tool is called, it does not block the conversation, and the tool result will be added to the conversation after generation. Note! This only applies to streaming API calls; non-streaming API calls will not be affected.',
        'currentTimeNotice': 'The agent will automatically display the current time and time zone information, or actively query the current time in a specified time zone.',
        'fileGetNotice': 'The agent will gain the ability to query files or image URLs.When obtaining the image URL, the main model needs to have visual capabilities or the visual model must be correctly configured and enabled.',
        'deepResearchNotice':  'Enable deep research mode, where the agent will dynamically plan and utilize tools to solve complex problems. It is recommended to use this mode in conjunction with internet access or a local knowledge base.',
        'fakeInferenceNotice': 'Enable pseudo-reasoning mode, allowing models without reasoning capabilities to also perform reasoning.',
        'latexRenderingEnhancementNotice': 'Enable the agent to output LaTeX formulas more stably.',
        'languageNotice': 'The agent will use the specified language and tone for the conversation.',
        'asrModel': 'ASR Model',
        'asrEngine': 'ASR Engine',
        'asr': 'ASR',
        'funasrURL': 'FunASR URL',
        'funasrURLPlaceholder': 'Please enter the FunASR URL',
        'funasrNotice1': '1. Please start the FunASR service in docker, you need to enter in the terminal:` docker run -d -p 10095:10095 -p 10096:10096 harryliu888/funasr-online-server:latest `',
        'funasrNotice2': '2. One hot keyword per line, each line should be filled in the format of keyword + space + weight',
        'mode': 'Mode',
        'online': 'Real-time',
        'offline': 'Non-real-time',
        'interactionMethod': 'Interaction Method',
        'auto':'Auto',
        'manual': 'Manual',
        'wakeWord': 'Wake Word',
        'endWordMode': 'End Word: Available when both wake word and end word modes are enabled. In this mode, the wake state will not auto sleep; messages containing the end word will trigger sleep state.',  
        'endWordPlaceholder': 'Please enter the end word',  
        'endWordDetected': 'Fall asleep',  
        'wakeWordAndEndWord': 'Wake Word and End Word',  
        'wakeWordMode': 'Wake Word: Available when wake word mode is enabled. In this mode, the system enters wake state only upon detecting the wake word and returns to sleep state after 30 seconds.',
        'wakeWordPlaceholder': 'Please enter the wake word',
        'hotkey': 'Hotkey: Press to record, release to send automatically',
        'Space':'Space',
        'hotwords': 'Hot words',
        'hotwordsPlaceholder': 'Please enter the hot words',
        'ttsModel': 'TTS Model',
        'asrReady': 'ASR Ready',
        'ttsEngine': 'TTS Engine',
        'tts': 'TTS',
        'initializing': 'Initializing',
        'backward': 'Backward',
        'forward': 'Forward',
        'edgettsLanguage': 'EdgeTTS Language',
        'edgettsGender': 'EdgeTTS Gender',
        'edgettsVoice': 'EdgeTTS Voice',
        'enabledInterruption': "Enable Interruption(if microphone is close to speaker, model's voice to interrupt itself)",
        'edgettsRate': 'EdgeTTS Rate',
        'maxConcurrency': 'TTS Max Concurrency',
        "zh":"Chinese",
        "en":"English",
        "ja":"Japanese",
        "ko":"Korean",
        "yue" :"Cantonese",
        "auto": "Auto",
        "auto_yue": "Auto (Cantonese)",
        "gsvRate": "Speech rate",
        "gsvSample_steps": "Sample steps",
        'gsvTextLang ':' Target language',
        'gsvPromptLang ':' Reference audio language',
        'gsvPromptAudio ':' Reference sound',
        'uploadGsvRefAudio ':' Upload reference sound',
        'gsvPromptText ':' Reference audio text',
        'gsvPromptTextPlaceholder ':' Please enter the corresponding text in the reference audio',
        'addRefAudio': 'Add reference sound',
        'gsvNotice1': '1. Please click the document link above to download the GSV-V4 integrated package.It is recommended to use the v2 version',
        'gsvNotice2': '2. Please open the terminal under the integration package project path, and execute ` runtime/python.exe api_v2.py -a 127.0.0.1 -p 9880 -c GPT_SoVITS/configs/tts_infer.yaml ` to start the service.',
        'gsvServer': 'GSV Server URL(You can enter multiple items, one per line.)',
        'gsvServerPlaceholder': 'Please enter the GSV Server URL',
        'Male': 'Male',
        'Female': 'Female',
        'tablePet': 'VRM Table Pet Bot',
        'table_pet_config': 'VRM Table Pet Config',
        'gotoVroid': 'Create in Vroid Studio',
        'start_table_pet': 'Start VRM Table Pet',
        'stop_table_pet': 'Stop VRM Table Pet',
        'reload_table_pet': 'Reload VRM Table Pet',
        'start_table_pet_web': 'Start VRM Table Pet (Web)',
        'WebSocketConnected':'WebSocket Connected - Click Disconnect',
        'WebSocketDisconnected':'WebSocket Disconnected - Click Connect',
        'dragWindow': 'Drag to move the window',
        'refreshWindow': 'Refresh the window',
        'closeWindow': 'Close the window',
        'enabledExpressions': 'Enable Expressions',
        "vrmModel": "VRM Model",
        "addVrmModel": "Add VRM Model",
        "uploadVrmModel": "Upload VRM Model",
        "clickOrDropVrm": "Click or drop .vrm file here",
        "modelDisplayName": "Model Display Name",
        "modelDisplayNamePlaceholder": "Please enter the display name for the model",
        "vrmNotice1": "1. Please upload the 3D model file in .vrm format; you can also upload the animation file in .vrma format to make the model have more idle actions!",
        "vrmNotice2": "2. left mouse button rotation, scroll wheel zoom, right button panning, the first button in the upper right corner drags the overall window",
        'vrmNotice3': '3. If you want to record a digital live-streaming video with a transparent background or record a desk pet during a live stream, please add `http://127.0.0.1:3456/vrm.html` as the video source in the browser source of the recording software',
        'Previous': 'Previous',
        'Next': 'Next',
        'gotoVroidHub': 'Vroid Hub',
        'webSpeech': 'Web Speech API (free, but only available in browser mode)',
        'webSpeechStarted': 'Web Speech API started',
        'webSpeechNotSupportedInElectron': 'In Electron, the Web Speech API is not supported, and the browser mode is automatically turned on...',
        'webSpeechNotice1': 'Start automatic speech recognition with Web Speech API engine, will automatically jump to browser mode',
        'windowWidth': 'Window Width',
        'windowHeight': 'Window Height',
        'live_stream': 'Live Stream',
        'live_stream_bot': 'Live Stream Bot',
        'bilibili': 'Bilibili',
        'roomID': 'Room ID',
        'roomIDPlaceholder': 'Please enter the room ID',
        'sessdata': 'SESSDATA',
        'sessdataPlaceholder': 'Please enter the SESSDATA',
        'web':'Web',
        'bilibili_open_live': 'Bilibili Open Live',
        'liveType': 'Live Type',
        'liveTypePlaceholder': 'Please select live type',
        'ACCESS_KEY_ID_Placeholder': 'Please enter the Access Key ID',
        'ACCESS_KEY_SECRET_Placeholder': 'Please enter the Access Key Secret',
        'APP_ID_Placeholder': 'Please enter the APP ID',
        'ROOM_OWNER_AUTH_CODE_Placeholder': 'Please enter the Room Owner Auth Code',
        'start_live': 'Start Live',
        'stop_live': 'Stop Live',
        'reload_live': 'Reload Live Config',
        'liveSetting': 'Live Setting',
        'onlyDanmaku': 'Only Reply Danmaku',
        'danmakuQueueLimit': 'Danmaku Queue Upper Limit',
        'gotoBilibiliOpenLive': 'Go to Bilibili Open Live',
        'wxBot': 'WeChat Bot',
        'wx_bot_config': 'WeChat Bot Config',
        'nickName': 'Nick Name',
        'nickNamePlaceholder': 'Please enter the nick name, and select this nick name, you can enter multiple nick names, or group chat names',
        'windowsOnly': 'Windows Only',
        'groupWakeWord': 'Group Wake Word',
        'triggerMode': 'Trigger Mode',
        'accuweather': 'Accuweather',
        'weatherNotice': 'You can obtain the latitude and longitude of a specified city, as well as real-time or forecasted weather information.Support the return of the weather forecast map',
        'openMeteoWeather': 'Open Meteo Weather',
        'wikipedia': 'Wikipedia',
        'wikipediaNotice': 'Get the summary or specific chapter content of a specified keyword from Wikipedia',
        'toolMemorandum': 'Tool Memorandum',
        'toolMemorandumNotice': 'When enabled, the tool call results will be added to the chat record, and when disabled, they will not be added to save token consumption',
        'arxiv': 'arXiv',
        'arxivNotice': 'Get the latest research paper information from arXiv',
        'gotoAplaybox': 'Aplaybox',
        'briefly': 'Briefly',
        'rewrite': 'Rewrite',
        'proxySettings': 'Proxy Settings',
        'proxyPlaceholder': 'Please enter the proxy address, such as http://127.0.0.1:7890 or http://127.0.0.1:10809',
        'proxyNotice': 'If following the system proxy does not work, you can try using a manual proxy. For manual proxy, you need to enter the HTTP proxy address, for example: http://127.0.0.1:7890 or http://127.0.0.1:10809.',
        'wxNotice': 'Before starting the robot, you must log in to WeChat and ensure that the WeChat window is on the screen. The agent party will simulate user behavior and take over your WeChat account.',
        'language': 'Language',
        'webSpeechAPI': 'Web Speech API',
        'openUserfile': 'Open data folder',
        'openLogfile': 'Open Log Folder',
        'userfileNotice': 'Deleting all files in the data folder will restore the software to its initial state',
        'logfileNotice': 'You can view the latest two log files in the log folder to obtain software operation logs',
        'openExtfile': 'Open extension folder',
        'SubtitleEnabled': 'Subtitle Enabled',
        'SubtitleDisabled': 'Subtitle Disabled',
        'bufferWord': 'Buffer Word',
        'bufferWordPlaceholder': 'Please enter a buffer word to make Text To Speech appear faster, for example: Um, OK',
        'UsingVRMAAnimations': 'Currently using VRMA Animations',
        'UsingProceduralAnimations': 'Currently using Procedural Animations',
        'clickToUse': '1. Click the button on the desktop to start browser mode',
        'scanToUse': '2. Scan the QR code with your mobile browser to quickly use the browser mode. Before scanning, ensure the network is set to "visible to all devices" in the system settings, and that your phone and computer are on the same local network.',
        'low': 'Low',
        'medium': 'Medium',
        'high': 'High',
        'reasoningEffort': 'Reasoning Effort(Only some models are supported, please select Automatic for unsupported models)',
        'TTSModelProvider': 'TTS Model Provider',
        'TTSVoice': 'Voice (you can manually add if not in the list)',
        'TTSRate': 'TTS Rate',
        'addTTSNotice': "After adding a vendor, please return to this page and select 【Text To Speech Model】to continue. Keywords such as 'tss' are generally included in the name of the automatic speech recognition model. Currently supported vendors: openai, silicon-based flow, etc. Note! The timbre needs to match the vendor's model, otherwise it cannot be used. Only the timbre of openai is included in the default timbre list. The timbre of other vendors needs to be added manually.The reference tone only supports silicon-based flow for the time being. Please do not choose the reference tone for other suppliers!",
        'noRefAudio': 'No reference audio is used',
        'error_start_HA': 'Error starting Home Assistant, please check the configuration',
        'error_stop_HA': 'Error stopping Home Assistant',
        'homeAssistant': 'Home Assistant',
        'success_start_HA': 'Home Assistant started successfully',
        'success_stop_HA': 'Home Assistant stopped successfully',
        'HANotice1': '1. Install Home Assistant',
        'HANotice2': '2. In Home Assistant, go to Settings > Devices & Services > Add Integration, then search for MCP and add the Model Context Protocol Server',
        'HANotice3': '3. The default baseURL is `http://localhost:8123`; change it to `http://<your_IP>:8123`',
        'HANotice4': '4. Click on your user avatar in the bottom left corner of Home Assistant, then go to Security, generate a Long-Lived Access Token at the bottom of the page, and copy the token into the API key field',
        'browserControl': 'Browser Control',
        'browserNotice1': '1. Click the link above to install the browser extension',
        'browserNotice2': '2. Your computer needs to have Node.js installed. You can download and install it from `https://nodejs.org/en/download`',
        'browserNotice3': '3. Click the connect button on the new extension page, then enable browser control on this page',
        'gotoBrowserExtension': 'Go to Browser Extension',
        'success_start_browserControl': 'Browser Control started successfully',
        'success_stop_browserControl': 'Browser Control stopped successfully',
        'error_start_browserControl': 'Error starting Browser Control, please check the configuration',
        'error_stop_browserControl': 'Error stopping Browser Control',
        'selectVrmModel': 'Select VRM Model',
        'defaultModels': 'Default Models',
        'userModels': 'User Models',
        'selectVrmaMotions': 'Select VRMA Motions',
        'defaultMotions': 'Default Motions',
        'userMotions': 'User Motions',
        'addVrmaMotion': 'Add VRMA Motion',
        'uploadVrmaMotion': 'Upload VRMA Motion',
        'clickOrDropVrma': 'Click or drag and drop VRMA motion to this area',
        'motionDisplayName': 'Motion Display Name',
        'motionDisplayNamePlaceholder': 'Please enter a motion display name',
        'characterDescription':' Character description ',
        'characterDescriptionPlaceholder':' Please enter character description ',
        'Personality':' Character ',
        'personalityPlaceholder':' Please enter character ',
        'mesExample':' Dialogue Example ',
        'mesExamplePlaceholder':`Please enter a conversation example, for example:
{{User}}: Hello
{{char}}: Hello`,
        'systemPromptPlaceholder':'Please enter the system prompt ',
        'characterBook':'Character Book ',
        'keysPlaceholder':`key1
Key2
Key3`,
        'contentPlaceholder':'Please enter relevant content that will be added to the context when relevant keywords are mentioned',
        "firstGreeting": "Opening remarks",
        'firstMes': 'Opening remarks',
        'alternateGreeting': 'Extra opening remarks',
        'keys': 'Keys (one per line)',
        'content': 'Content',
        'userName': 'User Name',
        'userNamePlaceholder': 'Please enter a user name, which determines how the character addresses you',
        'genericSystemPrompt': 'Generic System Prompt',
        'genericSystemPromptPlaceholder': 'Please enter a generic system prompt, for example: Always communicate with the user in the language they use',
        'avatar': 'Avatar',
        'avatarPlaceholder': 'Please enter the avatar link or the absolute path to the local file, for example: https://example.com/avatar.png or /Users/username/avatar.png or C:\\ Users\\ username\\ avatar.png',
        'translate': 'Translate',
        'translating': 'Translating',
        'is_sandbox': 'Is sandbox(No IP restrictions, but only you and your test group are visible, the sandbox replies to up to 4 messages at a time)',
        'warning': 'Warning',
        'confirmClearAllHistory': 'Confirm to clear all history',
        'clearSuccess': 'Clear success',
        'confirmKeepLastWeek': 'Confirm to keep last week history only',
        'keepLastWeek': 'Keep last week history only',
        'clearAllHistory': 'Clear all history',
        'getCardLink': 'Where can character cards be obtained?', 
        'memoryResultCount': 'long-term memory result count',
        'customTTS': 'Custom TTS (Most open-source TTS portable packages use this interface, such as cosyVoice/indexTTS, etc.)',
        'customTTSserver': 'Custom TTS server(You can enter multiple items, one per line.)',
        'customTTSServerPlaceholder': 'Please enter the custom TTS server URL',
        'customTTSspeaker': 'Custom TTS speaker',
        'customTTSspeakerPlaceholder': 'Please enter the custom TTS speaker',
        'customTTSspeed': 'Custom TTS speed',
        "customTTSKeyMapping": "API Parameter Key Mapping (Advanced)",
        "key_text": "Text Key Name (Default: text)",
        "key_speaker": "Speaker Key Name (Default: speaker)",
        "key_speed": "Speed Key Name (Default: speed)",
        'gsvGsvAudioPath':'Refer to the audio path (if the file is uploaded, leave it blank)',
        'gsvGsvAudioPathPlaceholder': 'Please enter the audio path',
        'selectAll': 'Select All',
        'batchDelete': 'Batch Delete',
        'batchDeleteSuccess': 'Batch delete success',
        'batchDeleteFailed': 'Batch delete fail',
        'readBot': 'Broadcasting Bot',
        'start_read': 'Start Reading',
        'stop_read': 'Stop Reading',
        'longTextPlaceholder': 'Please enter the long text that needs to be read. Combined with the desktop pet robot, it can achieve digital human voice broadcasting. You can use <voice name></voice name> to enclose the text with the corresponding voice name for multi-voice reading. If you import an EPUB format file, you can click the button in the upper right corner to switch chapters. If you want to quickly convert audio, choose fewer delimiters, such as only selecting line breaks. If you want digital human voice broadcasting or multi-voice reading, choose more delimiters, such as commas and periods.',
        'selectFile': 'Select File',
        "parseFileContent": "Parse File Content",
        "clearText": "Clear Text",
        'ttsNotEnabled': 'TTS is not enabled',
        'ttsAutoEnabled': 'TTS is enabled automatically',
        'getAPIkey': 'Get the API key or documentation',
        'getModelsList': 'Get models list',
        "defaultTTS": "Default TTS",
        "addNewTTS": "Add New TTS",
        "ttsName": "Sound name (You can choose or fill in manually)",
        "VRMAactionDeleted":"VRMA action deleted",
        'LockWindow':'Window unlocked (click to lock)',
        'UnlockWindow':'Window locked (click to unlock)',
        'gotoComfyui':'If you want to use comfyui, please click to jump',
        'ComfyuiInterface':'Comfyui Interface',
        'text2imgNotice':'The free Pollinations are used by default, and you can switch to the OpenAI Like interface to use other models.',
        'translateAndMark':'Translate and Label Voice',
        "Narrator":"Narrator",
        'CharacterMemory': 'Character Memory',
        'role': 'Role',
        'CharacterVoice': 'Character Voice',
        'CharacterAppearance': 'Character Appearance',
        "noNet":' (FREE! The Chinese network environment cannot be accessed，Unstable!)',
        'addNewAppearance': 'Add New Appearance',
        'AppearanceName': 'Appearance Name',
        'localConfigNotice02':'Click the key button to jump to the API key acquisition webpage or github related documentation',
        'localConfigNotice03':'Click the magnifying glass button to get the model list. If you don not get it, it may be that the supplier does not support getting the model list (such as Volcano Engine), or the API key is incorrect',
        'modelConfigDescription':'Model configuration description',
        'vrmAPI':'VRM Table Pet API',
        'vrmAPIInstructions':'Copy the url below to the screen recording software, such as the browser source of OBS, to transparently embed the table pet into your video',
        'downloadAudio':'Download Audio',
        'convertAudioOnly':'Convert Audio Only',
        'contentTooLong':'The content is too long, please send it in sections. The first 10,000 characters have been intercepted.',
        'stop_all_tts':'Stop All TTS',
        "processingProgress": "Processing progress",
        "waiting": "Waiting...",
        'goToMemoryNotice1':"Character cards are optional. If you need to use a character card, please click to jump to",
        'copyProvider':'Copy Provider',
        'goToMainAgentNotice1':"When you select an agent snapshot, all configurations will be locked to the configuration when the agent snapshot was created until you switch back to the default agent. To create an agent snapshot, click Jump to",
        'MainAgentInterface':'Agent snapshot interface',
        'prevPage': 'Previous Page',
        'nextPage': 'Next Page',
        'nextChapter': 'Next Chapter',
        'prevChapter': 'Previous Chapter',
        'audioConversionStopped': 'Audio conversion stopped',
        'audioMergeFailed': 'Audio merge failed',
        'audioDownloadStarted': 'Audio download started',
        'audioDownloadFailed': 'Audio download failed',
        'noValidAudioChunks': 'No valid audio chunks',
        'noAudioToDownload': 'No audio to download',
        'TTSInterface': 'TTS Interface',
        'SeparatorNotice': '1. If interacting with AI voice during playback, the playback will automatically pause and resume once the response ends.',
        'SeparatorNotice1': '2. To adjust voice synthesis settings, click to proceed',
        'audioConversionCompleted': 'Audio conversion completed',
        'CharacterBehavior': 'Character Behavior',
        'AutoBehavior': 'Autonomous behavior',
        'addNewBehavior': 'Add New Behavior',
        'behaviorConfig': 'Behavior Configuration',
        'longTermMemoryNote':'Long-term memory is optional. The number of long-term memory results controls the maximum number of relevant memories that the model can see each time. If you need to use long-term memory, please add a character card in the [Add New Character Card] interface and enable it in the Long-term Memory tab',
        'addBehavior': 'Add Behavior',
        'behavior': 'Behavior',
        'triggerType': 'Trigger Type',
        'time': 'Time',
        'noInput': 'No Input',
        'selectTime': 'Select Time',
        'repeatDays': 'Repeat Days(If not selected, it will not be repeated)',
        'monday': 'Monday',
        'tuesday': 'Tuesday',
        'wednesday': 'Wednesday',
        'thursday': 'Thursday',
        'friday': 'Friday',
        'saturday': 'Saturday',
        'sunday': 'Sunday',
        'noInputLatency': 'Idle delay (in seconds)',
        'actionType': 'Action Type',
        'prompt': 'Prompt',
        'promptPlaceholder': 'Please enter prompt content,For example: "Please find a topic and say something that will make users willing to chat with you."',
        'removeBehavior': 'Remove Behavior',
        'promptAction': 'Prompt (when the action is triggered, the following prompt word will be sent to the model to make it react) ',
        "noInputName": "Idle behavior",
        "timeName": "Timed behavior",
        "cycleName": "Cycle behavior",
        'allNoBriefly': 'Display reasoning and tool info',
        'allBriefly': 'Automatically hide reasoning and tool info',
        'enableAutoBehavior': 'Enable Auto Behavior',
        'deleteBehaviorSuccess': 'Delete Behavior Success',
        'randomEvent': 'Random Event',
        'eventType': 'Event Type',
        'random': 'Random',
        'order': 'Order',
        'triggerNewRandomEvent1': 'Triggered a random event:【',
        'triggerNewRandomEvent2': '】Please continue the conversation based on the event.',
        'cycleValue': 'Cycle Value',
        'repeatNumber': 'Repeat Number',
        'isInfiniteLoop': 'Is Infinite Loop',
        'addNewBehaviorChain': 'Add New Behavior Chain(Construction 🚧)',
        'autoBehaviorTool': 'Autonomous Behavior Tool', 
        'enableAutoBehaviorTool': 'Enable Autonomous Behavior Tool',
        'autoBehaviorToolNotice': "The Autonomous Behavior Tool lets models add automated behaviors, such as reminders or scheduled actions. Behaviors can be timed (like alarms), idle (triggered after inactivity), or periodic (repeating at intervals).",
        'deleteAllBehaviorSuccess': 'Delete All Behavior Success',
        'removeAllBehavior': 'Remove All Behavior',
        'resetBehavior': 'Reset Behavior',
        'minimal': 'Minimal',
        'none': 'None',
        'modifyAndSend': 'Modify and Send',
        'next': 'Next',
        'prev': 'Prev',
        'done': 'Done',
        'guide.model-config': 'Please click on [Model]',
        'guide.model-config-notice': 'Click on the [Model] interface to configure your first model',
        'guide.add-provider-card': 'Click on [Add New Provider]',
        'guide.add-provider-card-notice': 'Click on the [Add New Provider] button to add your first provider',
        'guide.show-Add-Dialog':'Select a provider',
        'guide.show-Add-Dialog-notice':'Select any provider and its corresponding provider URL will be automatically filled in.',
        'guide.confirm-Add-Provider-Button':'Click to confirm',
        'guide.get-API-key':'Click the key button',
        'guide.get-API-key-notice':'Click the key button to jump to the API key acquisition page or GitHub documentation.',
        'guide.get-Models-List':'Click the magnifying glass button',
        'guide.get-Models-List-notice':"Click the magnifying glass button to obtain a model list. If you don't get a list, it may be because the provider doesn't support getting a model list (for example, Volcano Engine), or the API key is incorrect.",
        'guide.model-Id':'Select a model',
        'guide.model-Id-notice':'Select a model or manually enter the model ID.',
        'guide.input-api-Key':'Enter API Key',
        'guide.driver-guide-btn':'Welcome to Super Agent Party!',
        'guide.driver-guide-btn-notice':'Clicking this button will restart the tutorial. Click Next to view the tutorial.It is not recommended to expose this service to the public internet, as there is a risk of API key leakage!',
        'guide.input-api-Key-notice': 'Local inference providers such as ollama/vllm do not need to fill in the API key, other providers do.',
        'mcpCreationFailed': 'MCP creation failed and has been automatically disabled. Please click the reconnect button or modify and try again',
        'inputMethod': 'Input Method',
        'jsonInput': 'JSON Input',
        'formInput': 'Form Input',
        'mcpName': 'MCP Name',
        'mcpURL': 'MCP URL',
        'mcpApiKey': 'MCP API Key',
        'mcpCommand': 'MCP Command',
        'mcpArgs': 'MCP parameters (each parameter occupies one line)',
        'mcpEnv': 'MCP environment variables (format: key=value, each variable occupies one line)',
        'mcpApiKeyPlaceholder': 'Enter the MCP API Key. Optional.',
        'mcpCommandPlaceholder': 'Enter the MCP command, for example: uvx or npx',
        'mcpArgsPlaceholder': 'arg1\narg2\narg3',
        'mcpEnvPlaceholder': 'key1=value1\nkey2=value2\nkey3=value3',
        'moreMCP': 'More MCP',
        'MCPvendor.MCP': 'MCP Official Collection',
        'MCPvendor.awesome': 'Awesome MCP Repository',
        'MCPvendor.docker': 'Docker MCP Hub',
        'tools': 'Tools',
        'clickToolInfo': 'Click on the tool tag to view detailed information',
        'vmcSettings': 'VMC Protocol Settings',
        'vmcReceiveEnable': 'Enable Receive (UDP)',
        'vmcReceivePort': 'Receive Port',
        'vmcSendEnable': 'Enable Send (UDP)',
        'vmcSendHost': 'Target Host',
        'vmcSendPort': 'Target Port',
        'syncExpression': 'Sync Expression',
        'vmcp': 'VMC Protocol',
        'vmcpInstructions':'VMC (Virtual Motion Capture) protocol is an open communication protocol based on UDP network. Its core purpose is to transmit real-time human motion capture data from one device (sender) to another device or software (receiver), thereby achieving real-time driving of virtual characters (such as VTuber avatars). The agent party supports bidirectional VMC protocol, with the default port for the receiving end at 39539 and the default port for the sending end at 39540. You can modify it on the table pet interface. VMC (Virtual Motion Capture) protocol is an open communication protocol based on UDP network. Its core purpose is to transmit real-time human motion capture data from one device (sender) to another device or software (receiver), thereby achieving real-time driving of virtual characters (such as VTuber avatars). The agent party supports bidirectional VMC protocol, with the default port for the receiving end at 39539 and the default port for the sending end at 39540. You can modify it on the table pet interface.',
        'vmcpReceive':'VMC Receive',
        'vmcpSend':'VMC Send',
        'SampleText': 'Sample Text',
        'SampleTextPlaceholder': 'Please enter sample text for testing voice',
        'ClickToListen': 'Click to Listen',
        'total_tokens': 'Total tokens',
        'first_token_latency': 'First token latency',
        'elapsedTime': 'Elapsed time',
        'EnterXR': 'Enter XR',
        'first_sentence_latency': 'First sentence latency',
        'TTSelapsedTime': 'TTS elapsed time',
        'moreButton': 'More',
        'brieflyButton': 'Hide Reasoning and Tools',
        'expandButton': 'Expand Input Box',
        'fileButton': 'Upload File',
        'imageButton': 'Upload Image',
        'reasonerButton': 'Reasoning',
        'deepSearchButton': 'Deep Research',
        'visionButton': 'Vision',
        'desktopVisionButton': 'Desktop Vision',
        'text2imgButton': 'Text to Image',
        'asrButton': 'Speech Recognition',
        'ttsButton': 'Text to Speech',
        'knowledgeBaseButton': 'Knowledge Base',
        'webSearchButton': 'Web Search',
        'memoryButton': 'Character card',
        'codeButton': 'Code Tools',
        'stickerButton': 'Stickers',
        'haButton': 'Smart Home',
        'chromeButton': 'Browser Control',
        'agentButton': 'Agent Tools',
        'llmButton': 'LLM Tools',
        'mcpButton': 'MCP',
        'a2aButton': 'A2A',
        'httpButton': 'HTTP Tools',
        'comfyuiButton': 'ComfyUI Tools',
        'vrmButton': 'VRM Desktop Companion',
        'behaviorBotton': 'Autonomous Behavior',
        'showMoreButton': 'Show More Button',
        'desktopVision': 'Desktop Vision(Main model needs visual capabilities or have enabled visual models.)',
        'maximize': 'Maximize',
        'minimize': 'Minimize',
        'close': 'Close',
        'assistantMode': 'Assistant Mode',
        'tutorial': 'Tutorial',
        'screenshot': 'Screenshot',
        'screenshotButton': 'Screenshot',
        'fixedWindow': 'Fixed Window',
        'unfixedWindow': 'Unfixed Window',
        'CapsuleMode': 'Capsule Mode',
        'keyTriggered': 'Key Triggered',
        'listening': 'Listening',
        'typing': 'Typing',
        'speaking': 'Speaking',
        'idle': 'Idle',
        'ExitFirstPerson': 'Exit First Person',
        'EnterFirstPerson': 'Enter first-person (WASD+QE, ESC to exit)',
        'addSystemPrompt': 'Add System Prompt',
        'editSystemPrompt': 'Edit System Prompt',
        'promptName': 'Prompt Name',
        'promptContent': 'Prompt Content',
        'usePrompt': 'Use Prompt',
        'moreSystemPrompt': 'More System Prompts',
        'prompt.awesome': 'awesome-chatgpt-prompts',
        'prompt.aiTool': 'system-prompts-and-models-of-ai-tools',
        'prompt.leaked': 'GPTs',
        'moreCard': 'More Character Cards',
        'card.chub': 'Chub',
        'card.janitorai': 'JanitorAI',
        'card.pygmalion': 'Pygmalion',
        'selectGaussScene': 'Select Gauss Scene',
        'defaultScenes': 'Default Scenes',
        'userScenes': 'User Scenes',
        'transparentBackground': 'Transparent Background',
        'addGaussScene': 'Add Gauss Scene',
        'uploadGaussScene': 'Upload Gauss Scene',
        'clickOrDropGaussScene': 'Click or Drop Gauss Scene',
        'GaussSceneDisplayName': 'Gauss Scene Display Name',
        'GaussSceneDisplayNamePlaceholder': 'Please enter the display name of the Gauss scene',
        'translateBot': 'Translate Bot',
        'targetLang': 'Target Language(Can be filled in manually)',
        'targetLangPlaceholder': 'Please enter or select target language',
        'sourceTextPlaceholder': 'Please enter text',
        'translatedTextPlaceholder': 'Translation result',
        "stop": "Stop",
        "clear": "Clear",
        'quickGenPlaceholder': 'Enter a character concept, e.g.: a tsundere catgirl living in a magical forest',
        'oneClickGenCard': 'One-click generate',
        'oneClickExpand': 'One-click expand',
        'noSystemPromptToExtend': 'No system prompt to extend',
        'stopGenCard': 'Stop generating',
        'genSuccess': 'Generation successful',
        'genFailed': 'Generation failed',
        'startGen': 'Generation has started, running in the background without affecting other functions',
        'quickGenSystemPromptPlaceholder': 'Enter a simple system prompt, e.g., Mermaid diagram assistant',
        'AIgening': 'AI is generating',
        'CLItool': 'CLI Tools',
        'CLIEngine': 'CLI Engine',
        'workspace': 'Workspace',
        'webNotSupported': 'Browser mode not supported, desktop only',
        'scriptExecuting': 'Script is executing; please check the terminal and add the configuration. After configuration is complete, you need to restart the agent party to obtain the latest configuration.',
        'installClaudeCode': 'Install Claude Code',
        'ClaudeCodeNotice1': 'If you have not yet installed Claude Code, click the button below to install it, or enter `bash -c "$(curl -fsSL http://127.0.0.1:3456/sh/claude_code_install.sh)"` directly in the terminal to install and configure a custom API address and API Key. Note! You must configure Claude Code to support the Anthropic interface type, otherwise an error will occur. If you reconfigure Claude Code, you need to restart the agent party to obtain the latest configuration.Not all vendor APIs may be available; please configure according to the vendor\'s API documentation.',
        'browserEmbedCodeInstructions': 'Browser embed code, you can embed the following code into your webpage to enable the embedding feature',
        'extraParamsNotice': 'Since different service providers may offer different parameters, which might go beyond the standard OpenAI format, you may need to manually add these parameters to suit your provider. If you are unsure how to adjust these parameters, you can refer to the provider’s API documentation. Common parameters include, for example, `enable_thinking`, which in some provider interfaces can control whether certain models enable reasoning.',
        'enabledCCconfig': 'Custom configuration enabled (uses system environment variables when disabled, uses the model you provided below when enabled)',
        'anthropicSupportedProviders': 'Configure a vendor that supports Anthropic: "Anthropic", "Deepseek", "SiliconFlow", "Zhipu AI", "Moonshot AI", "Alibaba Cloud Bailian", "ModelScope"',
        'selectSystemPrompt': 'Select System Prompt',  
        'selectSystemPromptPlaceholder': 'Please select a system prompt',   
        'QwenCodeNotice1': 'It is recommended to install using `npm install -g @qwen-code/qwen-code`',
        'installQwenCode': 'Install Qwen Code',
        'enableDesktopVisionWakeWord': 'Enable Desktop Vision Wake WordEnable desktop visual wake word (takes effect when you turn on desktop visual)',  
        'desktopVisionWakeWord': 'Desktop Vision Wake Word (multiple allowed, one per line)',  
        'desktopVisionWakeWordPlaceholder': 'Please enter desktop vision wake word(s) (multiple allowed, one per line)',
        'FullTextReading': 'Full Text Reading',
        'SegmentedReading': 'Segmented Reading',
        'segmentRead': 'Segmented Reading',
        'playNextSegment': 'Play Next Segment',
        'continuousPlay': 'Continuous Play',
        'stopSegmentTTS': 'Stop Segmented Reading',
        'noSegmentList': 'No Segment List',
        'NotExtension': 'No extension plugin; the extension panel defaults to rendering the last message',
        "defaultView": "Default View",
        "defaultViewDescription": "Show extension selection interface",
        "availableExtensions": "Available Extensions",
        "noExtensionsFound": "No extensions found",
        "noContent": "No content to display",
        "switchExtension": "Switch Extension",
        "extension": "Extension",
        'clickToSwitchExtensions': 'Click to switch extensions',
        'clickToSwitchModel': 'Click to switch models',
        'useExtension': 'Use Extension',
        'addNewExtension': 'Add New Extension',
        'waitExtensionInstall': 'Please wait for the extension to install. The extension page will automatically refresh upon successful installation.',
        'deleteSuccess': 'Deleted successfully',
        'deleteFailed': 'Failed to delete',
        'extensionExists': 'Extension already exists',
        'or': 'or',
        'localZipInstallation': 'Local Zip Installation',
        'goToExtensionPage': 'Go to the extensions page to add',
        'getPlugin': 'to view the plugin list',
        'refreshList': 'Refresh the list',
        'getPluginInstructions': 'Please enter the GitHub repository URL of the plugin or directly upload a local zip package.',
        'install': 'Install',
        'uninstall': 'Uninstall',
        'refreshedSuccess': 'Refreshed successfully',
        'refreshedFailed': 'Failed to refresh',
        'viewRepository': 'View Repository',
        'addYourExtensions': 'Add Your Extensions',
        'openInWindow': 'Open in Window',
        'openInBrowser': 'Open in Browser',
        'sherpa-onnx-sense-voice': 'Sherpa ONNX (need locally downloaded)',
        'edgetts': 'Edge TTS (Free, requires internet connection)',
        "modelExist": "Model already exists and can be used directly",
        "downloadFromModelScope": "Download from ModelScope",
        "downloadFromHuggingFace": "Download from Hugging Face",
        "deleteModel": "Delete Model",
        "Re-download": "Re-download",
        'installed': 'Installed',
        'notInstalled': 'Not Installed',
        'thinking': 'Thinking...',
        'update': 'Update',
        'downloadNodeJS': 'Download NodeJS',
        'webBilibiliInfo': 'The web mode is for learning and communication purposes only. Please do not use it in actual production environments. For long-term stable usage, please select the Bilibili Live Open Platform mode as the live streaming type.',
        'QQbotNotice':'Before use, please read the AIGC QQ Bot Access Guidelines',
        'loadExtension(node)': 'Load Extension (Node.js)',
        'loadExtension(static)': 'Load Extension (Static)',
        'waitForLoadingExt': 'Please wait for the extension to load',
        'agplNotice': 'For the full AGPL license text, please visit:',
        'thirdPartyNotice': 'Third-party license list:',
        'vectorInteractTitle': 'Edit Memory Bank',
        'memoryText': 'Memory Text',
        'createTime': 'Created Time',
        'lastTime': 'Modified Time',
        'vectorInteract': 'Edit Memory',
        'downloadMemory': 'Download Character Card',
        'editMemory': 'Edit Character Card',
        'removeMemory': 'Remove Character Card',
        'sqlControl': 'Database Tool',
        'downloadUV': 'Download uv',
        'sqlEngine': 'SQL Engine',
        'DB_URL': 'Database URL',
        'sqlNotice1': '1. Before using, you need to have uv installed on your computer. You can download and install it from `https://docs.astral.sh/uv/getting-started/installation/`',
        'sqlNotice2': '2. After filling in the corresponding database information, you can start it. For SQLite databases, you need to enter the absolute path of the `.db` file.',
        'success_start_sqlControl': 'Started successfully',
        'error_start_sqlControl': 'Failed to start',
        'success_stop_sqlControl': 'Stopped successfully',
        'error_stop_sqlControl': 'Failed to stop',
        'user': 'User',
        'host': 'Host',
        'port': 'Port',
        'password': 'Password',
        'dbname': 'Database',
        'dbpath': 'Database Path',
        'sqlButton': 'Database',
        'nodeInstalled': 'NodeJS has been installed',
        'nodeNotInstalled': 'NodeJS has not been installed',
        'installNode': 'Install NodeJS',
        'uvInstalled': 'uv has been installed',
        'uvNotInstalled': 'uv has not been installed',
        'installUv': 'Install uv',
        'gitInstalled': 'Git is installed and can be used to install extensions',
        'gitNotInstalled': 'Git has not been installed',
        'installGit': 'Install Git',
        'resume_read': 'Resume Reading',
        'pause_read': 'Pause Reading',
        'development': 'Development Tools',
        'getYouTubeAPIKey': 'Get YouTube API Key',
        'youtubeVideoId': 'YouTube Video ID',
        'youtubeVideoIdPlaceholder': 'Enter the video ID from the YouTube video URL, typically the content following `v=`',
        'youtubeApiKey': 'YouTube API Key',
        'youtubeApiKeyPlaceholder': 'Enter your Google API Key',
        'youtubeHelp': '1. Copy the video ID from the URL → 2. Fill in the above two fields to begin monitoring chat messages',
        'liveConfig': 'Live Configuration',
        'bilibiliLive': 'Bilibili Live',
        'youtubeLive': 'YouTube Live',
        'twitchLive': 'Twitch Live',
        'getTwitchAccessToken': 'Get Twitch Access Token for the bot account',
        'twitchChannel': 'Twitch Channel',
        'twitchChannelPlaceholder': 'Enter the Twitch channel name of the streaming account',
        'twitchAccessToken': 'Twitch Access Token',
        'twitchAccessTokenPlaceholder': 'Enter your Twitch Access Token',
        'twitchHelp': '1. Click the Access Token link above, log in with your bot account → 2. Copy the generated Token → 3. Fill in the Twitch Access Token for the bot account and the Twitch channel name of the streaming account → 4. Start listening to chat messages',
        'feishuBot': 'Feishu Bot',
        'feishu_bot_config': 'Feishu Bot Configuration',
        'gotoFeishuBot': 'Go to Feishu Open Platform to Create a Bot',
        'feishuBotNotice': 'Three-Minute Quick Guide to Creating a Bot',
        'enableTTS': 'Enable TTS',
        'discordBot': 'Discord Bot',
        "discord_bot_config": "Discord Bot",
        "gotoDiscordBot": "Go to Discord Developer",
        "discordBotNotice": "Official Documentation",
        "enterDiscordBotToken": "Please enter Discord Bot Token",
        "danmakuWakeWord": "Danmaku Wake Word",
        "wakeWordPlaceholder": "If not filled in, it will be defaulted to empty, that is, no wake word is used. If filled in, only the danmaku containing this word will be triggered",
        'openaiStyleAPIKey': 'API key (this interface has no key verification, similar to ollama, the key can be filled in casually)',
        "telegram_bot_config": "Telegram Bot Configuration",
        "gotoTelegramBot": "Go to BotFather",
        "telegramBotNotice": "API Documentation",
        "enterTelegramBotToken": "Please enter the Token obtained from BotFather",
        'telegramBot': 'Telegram Bot',
        "systemProxy": "Follow system proxy",
        "manualProxy": "Manual proxy",
        "noneProxy": "No proxy",
        'logoPage':'Logo Page',
        'openaiStream': 'Enable Stream Mode',
        'customStream': 'Enable Stream mode (some models do not support)',
        'serverLogs': 'Server Logs',
        'refresh': 'Refresh',
        'EmbeddingFailed': 'Embedding model type mismatch or unable to invoke normally',
        'minilmNotice': 'Before enabling the knowledge base feature, you must configure a word embedding model. A lightweight local model is provided below. You can click to download it locally, or choose another word embedding model you have added in the model service.',
        'minilmNotice2': 'The first option is a local word embedding model that requires manual download. Please download it on this interface.',
        'minilmNotice3': 'Before enabling the character long-term memory feature, you must configure a word embedding model. A lightweight local model is provided below. You can click to download it locally, or choose another word embedding model you have added in the model service.',
        'minilmNotice4': 'The second option is a local word embedding model that requires you to manually click to download. Please download it on this interface.',
        'goToMemory': 'Go to the character card config interface',
        'systemtts': 'System TTS (average effect, extremely fast, suitable for testing)',
        'Timbre': 'Timbre',
        'sysTTS': 'System TTS',
        'noSystemVoiceDetected': 'No system voice detected',
    },
  };
