export const dataPipelines = {
  "toSel": "Please select ",
  "toInput": "Please Input ",
  "noData": "No data available.",
  "saveConfiguration": "Save Configuration",
  "dataAcquisition": "Data Acquisition",
  "dataCollectionTask": "Data Collection Task",
  "dataSourceManagement": "Data Source Management",
  "formatConversion": "Format Conversion",
  "dataFormatConversion": "Data Format Conversion",
  "taskDescription": "Task Description",
  "sourceFormat": "Source Format",
  "targetFormat": "Target Format",
  "dataFlowBranch": "Data Flow Branch",
  "startExecution": "Start Execution",
  "inProgress": "In Progress",
  "searchTaskName": "Search Task Name",
  "confirmTermination": "Confirm Termination",
  "terminate": "Terminate",
  "waiting": "Waiting",
  "error": "Error",
  "taskStatus": "Task Status",
  "labelStudio": "Data Labeling",
  "dataSourceInfo": {
    "Mysql": {
      "title": "Relational Database (MySQL)",
      "desc": "Batch import database tables with custom tables and fields"
    },
    "Mongodb": {
      "title": "NoSQL Database (MongoDB)",
      "desc": "Import non-relational data with collection/field selection and schema conversion"
    },
    "File": {
      "title": "File Data Import",
      "desc": "Supports CSV, Excel, JSON and various file formats"
    },
    "Hive": {
      "title": "Hive System Import",
      "desc": "Efficiently read data stored in Hive systems"
    }
  },
  "testingConnection": "Testing connection",
  "submitting": "Submitting",
  "pleaseSelectAnExecutionTime": "Please select an execution time",
  "deletingTask": "Deleting task",
  "terminatingTask": "Terminating task",
  "createTask": "Create Task",
  "addDataSource": "Add Data Source",
  "fileFormat": "File Format",
  "connectionStatus": "Connection Status",
  "dataSourceType": "Data Source Type",
  "searchDataSources": "Search Data Sources",
  "searchNameOrDescription": "Search name",
  "dataProcessing": "Data Processing",
  "dataProcessingConfiguration": "Data Processing Configuration",
  "taskFlowConfiguration": "Task Flow Configuration",
  "dataExportConfiguration": "Data Export Configuration",
  "taskExecuted": "Task Executed",
  "taskExecutionFailed": "Task Execution Failed",
  "cannotCancel": "Task processing in progress, cannot cancel",
  "taskSuccessStop": "Task successfully stopped",
  "taskStopFailed": "Task stop failed",
  "processingResult": "Processing Result",
  "algorithmTemplate": "Algorithm Template",
  "builtInTemplate": "Built-in Template",
  "customTemplate": "Custom Template",
  "operatorManagement": "Operator Management",
  "systemDashboard": "System Dashboard",
  "concurrentTaskMonitoring": "Concurrent Task Monitoring",
  "myAlgorithmTemplate": "My Algorithm Template",
  "createAlgorithmTemplate": "CreateAlgorithm Template",
  "dataProcessingDescription": "Data processing allows users to leverage different model operators to handle data used in LLMs. This includes data cleaning, automatic data augmentation, and analysis, enabling users to obtain higher quality data.",
  "nodeName": "Node Name",
  "nodeConfig": "Node Config",
  "nodeType": "Node Type",
  "nodeNotSelected": "Node not selected",
  "fieldRequired": "{field} is required",
  "noNodesError": "No nodes exist in the workflow",
  "unnamedNode": "Unnamed node({id})",
  "unconnectedNodeError": "Node {nodeName}: not connected to any other nodes",
  "invalidConfigError": "Configuration for node {nodeName} is incomplete, please check required fields",
  "configRequiredError": "The '{configName}' field for node '{nodeName}' is mandatory, please fill it out completely",
  "saveError": "Error saving workflow",
  "noMatchingNodeFound": "No matching node found",
  "searchProcessing": "Search processing task",
  "zoomIn": "Zoom in",
  "zoomOut": "Zoom out",
  "resetView": "Reset view",
  "clearCanvas": "Clear canvas",
  "operationGuide": "Operation Guide",
  "operationGuide1": "Drag nodes from the left to the canvas area on the right",
  "operationGuide2": "Click on the connection point on the node and drag it to another node to create a connection",
  "operationGuide3": "Dragging nodes can adjust their position",
  "operationGuide4": "Hover over the node with the mouse to display the delete button",
  "operationGuide5": "Click the delete button or press the Delete key to delete the selected node",
  "configInfo": "Config Information",
  "search": "Search",
  "loading": "Loading",
  "taskCategories": "Task Categories",
  "allCategories": "All categories",
  "taskList": "Task List",
  "taskName": "Task Name",
  "DatabaseName": "Database Name",
  "ServerAddress": "Server Address",
  "port": "Port Number",
  "username": "Username",
  "password": "Password",
  "authType": "Authentication Type",
  "collectionSourceName": "Collection Source Name",
  "server": "Server",
  "database": "Database",
  "task": "Task",
  "dataSourceDetails": "Data Source Details",
  "close": "Close",
  "operationSuccessful": "Operation Successful",
  "operationFailed": "Operation Failed",
  "basicInformation": "Basic Information",
  "dataSourceName": "Data Source Name",
  "lastUpdate": "Last Update",
  "normal": "Normal",
  "toBeTested": "To be tested",
  "anomaly": "Anomaly",
  "useRecord": "Usage Record",
  "dataImportTask": "Data Import Task",
  "persons": "task",
  "recentlyUsed": "Recently Used",
  "dataVolume": "Data Volume",
  "total": "Total",
  "startAt": "Started at",
  "done": "Completed",
  "taskRunningHost": "Task Running Host",
  "recordsHaveBeenImported": "Records Imported",
  "totalNumberOfRecords": "Total Number of Records",
  "cancelTask": "Cancel Task",
  "refreshStatus": "Refresh Status",
  "viewLog": "View Log",
  "resourceOccupation": "Resource Occupation",
  "RunItAgain": "Run Again",
  "dataConnectionConfiguration": "Data Connection Configuration",
  "authType_option_NONE": "No identity verification",
  "authType_option_LDAP": "Use LDAP/AD-based user identity verification",
  "authType_option_KERBEROS": "Use Kerberos/GSSAPI for identity verification",
  "authType_placeholder": "Currently only LDAP mode is supported",
  "collectionSourceDesc": "Collection Source Description",
  "testLink": "Test Connection",
  "dataFilteringConfiguration": "Data Filtering Configuration",
  "selectionSet": "Selection Set",
  "searchForTheTableName": "Search Table Name",
  "allFields": "All Fields",
  "selectAll": "Select All",
  "saveTheConfiguration": "Save Configuration",
  "saveAndExecute": "Save and Execute",
  "executeImmediately": "Execute Immediately",
  "selectTheExecutionTime": "Select Execution Time",
  "sure": "Confirm",
  "PleaseSelectTime": "Please Select Time",
  "fileUpload": "File Upload",
  "jumpLink": "Jump Link",
  "linkSuccess": "Connection Successful",
  "linkError": "Connection Failed",
  "connectionInformation": "Connection information",
  "TaskFailed": "Task Failed",
  "manualStop": "Manual stop",
  "public": "Public",
  "private": "Private",

  "createTime": "Creation Time",
  "dataAmount": "Data Amount",
  "finishTime": "Completion Time",
  "processedDataAmount": "Processed Data Amount",
  "processInfo": "Processing Details",
  "processStatus": "Running Status",
  "processedData": "Processed Data",
  "graphicDemonstration": "Graphic Demonstration",
  "sessionProcessedResult": "Session Processing Result",
  "index": "Index",
  "preSession": "Before Processing Session",
  "processType": "Processing Method",
  "afterSession": "After Processing Session",
  "taskLog": "Task Log",
  "logName": "Log Name",
  "downloadLog": "Download Log",
  "others": "Others",
  "replace": "Replace",
  "deduplicate": "Deduplicate",
  "remove": "Remove",
  "data_refine": "Data Refinement",
  "Internal": "Internal",
  "data_generation": "Data Generation",
  "data_enhancement": "Data Enhancement",

  "data_source": "Data Source",
  "execution_completed_normally": "Execution completed (normal)",
  "execution_end_error": "Execution ended (error)",
  "stopped": "Stopped",
  "celery_node_service_list": "Celery Node Service List",
  "ip_address": "IP Address",
  "current_number_tasks": "Current Number of Tasks",
  "node_status": "Node Status",
  "heartbeat_time": "Heartbeat Time",

  "taskType": "Task Type",
  "dataCleaning": "Data Cleaning",
  "processingStatus": "Processing Status",
  "processingText": "Processing Text",
  "completed": "Completed",
  "dataSource": "Data Source",
  "dataSourceBranch": "Data Source Branch",
  "dataFlow": "Data Flow",
  "startTime": "Start Time",
  "endTime": "End Time",
  "executionStatus": {
    "success": "Success",
    "error": "Failed",
    "processing": "Processing",
    "wainting": "Pending",
  },
  "unknown": "Unknown",
  "online": "Online",
  "offline": "Offline",
  "operations": "Operations",
  "delete": "Delete",
  "deleteConfirm": "Confirm Delete",
  "cancelConfirm": "Confirm Cancellation",
  "rerunItConfirm": "Confirm Rerun",
  "execute": "Execute",
  "cancelExecute": "Cancel Execution",
  "executeConfirm": "Confirm Execution",
  "confirm": "Confirm",
  "reset": "Replace",
  "details": "Details",
  "authorize": "authorize",
  "settings": "Settings",
  "operatorAuthorization": "Operator Authorization",
  "SearchUserName": "Search User Name",
  "SearchOrganizationName": "Search Organization Name",
  "person": "person",
  "organization": "organization",
  "selected": "Selected",
  "editIcon": "Edit Icon",
  "iconPreview": "Icon Preview",
  "please": "Please",
  "uploadTips1": "Support JPG and PNG format icons, up to 10MB",
  "uploadTips2": "A new icon has been uploaded, you can continue to upload and replace or click OK to save.",
  "uploadIcon": "Upload Icon",
  "uploadStatusTips1": "Click or drag the icon here",
  "uploadStatusTips2": "The current icon will be replaced after uploading",
  "uploading": "Uploading",
  "uploadSuccess": "Upload Success",
  "uploadSuccessTips1": "You can continue to upload replacements or click OK to save.",
  "reUpload": "Re-upload",
  "uploadFailed": "Upload Failed",
  "retry": "Retry",
  "uploadSuccessTips2": "Icon uploaded successfully",
  "uploadFailedTips1": "Please upload an icon in JPG or PNG format",
  "uploadFailedTips2": "The icon size cannot exceed 10MB.",
  "uploadFailedTips3": "Upload failed, please try again",
  "networkError": "Network error, please check the connection and try again",
  "algorithmTemplateDescription": "The algorithm template allows users to build workflows using various model operators, enabling tasks such as data cleaning, automated data augmentation, and analysis.",
  "taskTemplate": "Task Template",
  "searchTemplate": "Search Template",
  "templateName": "Template Name",
  "templateDescription": "Template Description",
  "searchTaskTemplate": "Search Task Template",
  "searchOperator": "Search Operator",
  "nextStep": "Next Step",
  "previousStep": "Previous Step",
  "create": "Create",
  "edit": "Edit",
  "type": "Type",
  "copy": "Copy",
  "use": "Use",
  "templateList": "Template List",
  "createTemplate": "Create Template",
  "editTemplate": "Edit Template",
  "general": "General",
  "dataCleaningDescription": "Clean data using various operators such as deduplication and desensitization, ensuring it meets the required standards for use.",
  "dataAugmentation": "Data Augmentation",
  "dataAugmentationDescription": "Automatically generate more data from seed data for training, with support for custom parameters and prompts.",
  "textClassification": "Text Classification",
  "textClassificationDescription": "Augment training data for text classification tasks, applicable to scenarios such as sentiment classification, tag classification, and product classification.",
  "textExtraction": "Text Extraction",
  "textExtractionDescription": "Augment training data for text extraction tasks, applicable to scenarios such as specific format extraction, entity extraction, and key element extraction.",
  "textGeneration": "Text Generation",
  "textGenerationDescription": "Augment training data for text generation tasks, applicable to scenarios such as news writing, ad copy generation, and content stylization.",
  "apply": "Apply",
  "newTask": "New Task",
  "pushToOriginalDataset": "Push to original dataset",
  "pushToOriginalDatasetDescription": "Once data cleaning is completed, it will be pushed to the original dataset repo as a new submission",
  "pushToNewDataset": "Push to new dataset",
  "pushToSelectedDatasetDescription": "Once data cleaning is completed, it will be pushed to the selected dataset",
  "targetDataset": "Target dataset name",
  "predefinedOperatorSelection": "Predefined Operator Selection",
  "predefinedOperator": "Predefined Operator",
  "peratorTip": "Currently supports multiple predefined operators of types Mapper, Filter, and Deduplicator",
  "publishAsNewTemplate": "Publish as New Template",
  "executionOrder": "Execution Order",
  "enableOrNot": "Enable or Not",
  "addOperator": "Add Operator",
  "operatorType": "Operator Type",
  "operatorName": "Operator Name",
  "textNormalization": "Text Normalization",
  "removeSpecialContent": "Remove Special Content",
  "maskSensitiveInformation": "Mask Sensitive Information",
  "specialCharacterRatioFiltering": "Special Character Ratio Filtering",
  "sensitiveWordFiltering": "Sensitive Word Filtering",
  "nGramRepetitionRatioFiltering": "N-Gram Repetition Ratio Filtering",
  "lengthFiltering": "Length Filtering",
  "md5Deduplication": "MD5 Deduplication",
  "articleSimilarityDeduplication": "Article Similarity Deduplication",
  "toxicityRemoval": "Toxicity Removal",
  "operatorConfiguration": "Operator Configuration",
  "unicodeTextNormalization": "Unicode text normalization",
  "convertTraditionalChineseToSimplifiedChinese": "Convert Traditional Chinese to Simplified Chinese",
  "removeURLLinks": "Remove URL Links",
  "removeInvisibleCharacters": "Remove invisible characters",
  "removeHtmlTagsAndParseHtmlContent": "Remove html tags and parse html content",
  "maximumRatio": "Maximum ratio",
  "lengthN": "Length N",
  "minimumLength": "Minimum length",
  "characters": "Characters",
  "windowLength": "Window Length",
  "description": "Description",
  "textNormalizationDesc": "Unicode text normalization and conversion from Traditional Chinese to Simplified Chinese",
  "removeSpecialContentDesc": "Remove special content from the text, such as URLs, invisible characters, and html tags.",
  "maskSensitiveInformationDesc": "Mask sensitive information, such as replacing email addresses with [EMAIL], phone numbers with [TELEPHONE] or [MOBILEPHONE], and ID numbers with [IDNUM].",
  "specialCharacterRatioFilteringDesc": "Filter text based on the ratio of special characters. Keep samples where the number of special characters does not exceed a set threshold of the total text length. Special characters include punctuation marks, numbers, spaces, and emojis. Samples exceeding the set ratio will be filtered out.",
  "sensitiveWordFilteringDesc": "Filter out samples containing sensitive words.",
  "nGramRepetitionRatioFilteringDesc": "Keep samples where the character-level N-Gram repetition ratio does not exceed the set threshold. Samples exceeding the threshold will be filtered out.",
  "lengthFilteringDesc": "Filter data based on text length. Samples outside the length range will be filtered out.",
  "md5DeduplicationDesc": "Deduplicate samples by comparing their MD5 hash. Samples with matching MD5 values will be filtered out.",
  "articleSimilarityDeduplicationDesc": "Use the SimHash algorithm to calculate text similarity. Samples exceeding the similarity threshold will be filtered out.",
  "toxicityRemovalDesc": "Automatically detect, analyze, and remove sensitive or non-compliant content from the data. This operator only analyzes and processes the content and does not save or keep any data before or after processing.",
  "previewBefore": "Preview (Before Cleaning)",
  "previewAfter": "Preview (After Cleaning)",
  "creationCompleted": "Creation Completed",
  "updateTemplate": "Update Template",
  "cancel": "Cancel",
  "templateNameExists": "Template name already exists, please use a different name",
  "Queued": "Queued",
  "Processing": "Processing",
  "Finished": "Finished",
  "Failed": "Failed",
  "Timeout": "Timeout",
  "Canceled": "Canceled",
  "sessionDel": "Session has been deleted",

  "toolsTit": "Tool Pool",
  "toolsDec": "The Dataflow Tool Pool is a one-stop, multi-modal data processing system that enhances data quality, increases value, and makes it more suitable for large model processing.",
  "toolsSearch": "Search Tools",
  "toolsType": "Tool Categories",
  "toolsName": "Tool Name",
  "toolsUse": "Use Tool",
  "taskType1": "Operator",
  "taskType2": "Tool",
  "log": "Log",
  "toolsTab1": "Internal Tools",
  "toolsTab2": "External Tools",

  "analysis_common_internal": "analysis common",
  "dataset_spliter_by_language_preprocess_internal": "dataset spliter by language preprocess",
  "prepare_dataset_from_repo_preprocess_internal": "prepare dataset from repo preprocess",
  "raw_alpaca_cot_merge_add_meta_preprocess_internal": "raw alpaca cot merge add meta preprocess",
 "raw_arxiv_to_jsonl_preprocess_internal": "raw arxiv to jsonl preprocess",
  "raw_stackexchange_to_jsonl_preprocess_internal": "raw stackexchange to jsonl preprocess",
  "reformat_csv_nan_value_preprocess_internal": "reformat csv nan value preprocess",
  "reformat_jsonl_nan_value_preprocess_internal": "reformat jsonl nan value preprocess",
  "serialize_meta_preprocess_internal": "serialize meta preprocess",
  "count_token_postprocess_internal": "count token postprocess",
  "data_mixture_postprocess_internal": "data mixture postprocess",
  "deserialize_meta_postprocess_internal": "deserialize meta postprocess",
  "quality_classifier_common_internal": "quality classifier common",

  "opencsg_data_extraction_preprocess_internal": "opencsg data extraction preprocess",
  "opencsg_scrape_url_data_preprocess_internal": "opencsg scrape url data preprocess",
  "fineweb_edu_chinese_common_internal": "fineweb edu chinese common",
  "smoltalk_chinese_common_internal": "smoltalk chinese common",
  "cosmopedia_chinese_preprocess_internal": "cosmopedia chinese preprocess",


  "analysis_common_internal_dec": "This analyzer class is used to analyze specific datasets. It calculates statistics for all filtering operations in the configuration file, applies various analyses (such as overall analysis, column-by-column analysis, etc.) to these statistics, and generates analysis results (statistical tables, distribution charts, etc.) to help users better understand the input dataset.",
  "dataset_spliter_by_language_preprocess_internal_dec": "Load the dataset from the source directory, then use the operation filter named LanguageIDScoreFilter for language identification, and finally split the dataset by language and save it.",
  "prepare_dataset_from_repo_preprocess_internal_dec": "Prepare datasets from code repositories, including: repository name, file path in the repository, and file content.",
  "raw_alpaca_cot_merge_add_meta_preprocess_internal_dec": "Convert raw Alpaca-Cot data downloaded from Hugging Face into JSONL files, merge instruction/input/output texts, and add metadata information.",
  "raw_arxiv_to_jsonl_preprocess_internal_dec": "Convert raw arXiv data (gzipped tar files) to JSONL format.",
  "raw_stackexchange_to_jsonl_preprocess_internal_dec": "Convert raw Stack Exchange data downloaded from Archive (reference: https://archive.org/download/stackexchange) into multiple JSONL files.",
  "reformat_csv_nan_value_preprocess_internal_dec": "Use Hugging Face to load CSV or TSV files that may contain NaN values, and can be processed by setting additional parameters (such as setting keep_default_na to False).",
  "reformat_jsonl_nan_value_preprocess_internal_dec": "Reformat JSONL files that may contain NaN values. Traverse the JSONL file, find the first object that does not contain NaN as the reference feature type, and set it as the benchmark when loading all JSONL files.",
  "serialize_meta_preprocess_internal_dec": "Serialize all fields in the JSONL file except those specified by the user to ensure that the dataset can still be loaded normally even if the text format of each line in the JSONL file is inconsistent.",
  "count_token_postprocess_internal_dec": "Count the number of tokens for a given dataset and tokenizer. Currently, only JSONL format is supported.",
  "data_mixture_postprocess_internal_dec": "Mix multiple datasets into one dataset. Randomly select samples from each dataset and mix these samples, then export them as a new mixed dataset. Supported formats include: [\"jsonl\", \"json\", \"parquet\"].",
  "deserialize_meta_postprocess_internal_dec": "Deserialize specified fields in the JSONL file.",
  "quality_classifier_common_internal_dec": "This quality classifier class is used to predict the scores of documents in the dataset. It will calculate scores for all rows and provide two columns for each row: score and should_keep, to help users decide which row should be deleted. By default, if the score is higher than 0.9, the row will be marked as should_keep=1.",
  "opencsg_data_extraction_preprocess_internal_dec": "A high-quality tool for converting PDF to Markdown and JSON",
  "opencsg_scrape_url_data_preprocess_internal_dec": "A large language model-based data scraping tool for websites and local documents (XML, HTML, JSON, etc.)",
  "fineweb_edu_chinese_common_internal_dec": "Users can define their own scoring criteria, score the data from the data source based on these criteria, and filter the data. The maximum score is 5.",
  "smoltalk_chinese_common_internal_dec": "Use a fixed system_prompt to generate relevant multi-round dialogues with a large model and score them. Filter the data based on the score specified by the user, and only retain the one with the highest score.",
  "cosmopedia_chinese_preprocess_internal_dec": "A detailed tutorial on converting raw text to WikiHow style using the MakeCosmopediaMapper operator. This tool invokes large language models to generate structured tutorial content based on the input seed text.",
}