#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import re
import os
from datasets import load_from_disk, Dataset

class DatasetCleaner:
    @staticmethod
    def clean_text(text, min_length=10, keep_pattern=r'[\u4e00-\u9fff，。！？、：“”‘’\d\w\s]'):
        """
        Clean text data.

        :param text: Text to be cleaned.
        :param min_length: Minimum length of the cleaned text. If the length is less than this value, None will be returned. Default is 10.
        :param keep_pattern: Regular expression pattern for characters to be retained. Default includes Chinese characters, Chinese punctuation, digits, letters, and whitespace.
        :return: Cleaned text. Returns None if the text is invalid or the length after cleaning is less than min_length.
        """
        if not text or not isinstance(text, str):
            return None
        # Remove control characters
        text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]', '', text)
        if keep_pattern:
            # Remove characters not in the keep pattern
            text = re.sub(f'[^{keep_pattern}]', '', text)
        # Replace multiple whitespace characters with a single space and remove leading and trailing whitespace
        text = re.sub(r'\s+', ' ', text).strip()
        return text if len(text) >= min_length else None

    @staticmethod
    def process_dataset(input_path, output_path, text_field='text', **clean_kwargs):
        """
        Process a single dataset and clean the text field in the dataset.

        :param input_path: Path to the input dataset.
        :param output_path: Path to save the cleaned dataset.
        :param text_field: Name of the text field to be cleaned in the dataset. Default is 'text'.
        :param clean_kwargs: Additional arguments passed to the clean_text method.
        :return: A tuple containing the number of samples retained after cleaning and the total number of original samples.
        """
        if not os.path.exists(input_path):
            raise FileNotFoundError(f"Dataset path does not exist: {input_path}")

        # Create the parent directory of the output path
        os.makedirs(os.path.dirname(output_path), exist_ok=True)

        # Load the dataset from disk
        dataset = load_from_disk(input_path)
        cleaned_data = []
        for sample in dataset:
            # Get the text to be cleaned
            text = sample.get(text_field, '')
            # Clean the text
            cleaned_text = DatasetCleaner.clean_text(text, **clean_kwargs)
            if cleaned_text:
                # Copy the sample and update the cleaned text field
                new_sample = {k: v for k, v in sample.items() if k != text_field}
                new_sample[text_field] = cleaned_text
                cleaned_data.append(new_sample)

        # Create a new dataset from the list of cleaned data
        cleaned_dataset = Dataset.from_list(cleaned_data)
        # Save the cleaned dataset to disk
        cleaned_dataset.save_to_disk(output_path)
        return len(cleaned_data), len(dataset)

    @staticmethod
    def auto_clean(input_dir, output_dir='data_clean', **clean_kwargs):
        """
        Automatically clean all datasets in the specified directory.

        :param input_dir: Input directory containing multiple datasets.
        :param output_dir: Output directory for the cleaned datasets. Default is 'data_clean'.
        :param clean_kwargs: Additional arguments passed to the clean_text method.
        :return: Returns True after cleaning is completed.
        """
        if not os.path.isdir(input_dir):
            raise NotADirectoryError(f"Input path is not a directory: {input_dir}")

        for dataset_name in os.listdir(input_dir):
            input_path = os.path.join(input_dir, dataset_name)
            if os.path.isdir(input_path) and not dataset_name.endswith('_clean'):
                output_path = os.path.join(output_dir, f"{dataset_name}_clean")
                if not os.path.exists(output_path):
                    # Process the dataset and get the cleaning results
                    cleaned_count, total_count = DatasetCleaner.process_dataset(
                        input_path, output_path, **clean_kwargs
                    )
                    print(f"✅\tCleaning completed: {dataset_name} -> {dataset_name}_clean | Samples retained: {cleaned_count}/{total_count}")
                else:
                    print(f"✅\tCleaned dataset already exists: {output_path}, skipping processing")
        return True