import pandas as pd
import jieba
import re
from collections import Counter
import pycorrector


class DataLoader:
    def __init__(self, path: str, stop_words_path: str):
        """
        中文数据集预处理类

        Args:
            :param path: 数据集路径
            :param stop_words_path: 停用词表路径
        """
        self.df = pd.read_csv(path)
        self.stopwords = self.load_chinese_stop_words(stop_words_path)
        # MacBERT4CSC
        self.corrector = pycorrector.MacBertCorrector()

    def analyze(self):
        """
        如果无需处理数据，当做工具类使用就不调用该方法
        :return: 返回类对象
        """
        self._preprocess_data()
        return self

    def _preprocess_data(self):
        """
        预处理操作流
        """
        self.df = self.df.dropna()
        self.df['word_count'] = self.df['sentence'].apply(lambda x: len(jieba.lcut(str(x))))
        self.df['char_count'] = self.df['sentence'].str.len()
        self.df['avg_word_length'] = self.df['sentence'].apply(self.avg_word_length)
        self.df['text_clean'] = self.df['sentence'].apply(self.clean_chinese_text)
        self.df['text_corrected'] = self.df['text_clean'].apply(self._correct_spell_check)
        self.df['text_seg'] = self.df['text_corrected'].apply(self.chinese_segmentation)
        self.df['text_no_stop'] = self.df['text_seg'].apply(self._remove_stopwords)
        all_words = []
        for text in self.df['text_no_stop']:
            all_words.extend(text.split())
        word_freq = Counter(all_words)
        rare_words = set([word for word, count in word_freq.items() if count <= 2])
        self.rare_words = rare_words
        self.df['processed_text'] = self.df['text_no_stop'].apply(lambda x: self.remove_rare_words(x, rare_words))

    def _remove_stopwords(self, text: str):
        """
        根据停用词表，删除文本中的停用词
        :param text: 输入文本
        :return: 删除停用词后的文本
        """
        words = text.split()
        return ' '.join([word for word in words if word not in self.stopwords and len(word) > 1])

    def _correct_spell_check(self, text: str):
        """
        基于pycorrector库的拼写检查
        无法纠正汉字顺序错误
        但众所知周，汉字序顺并不定一影阅响读。比如当你看完这句话后，才发这现这里的字全是都乱的。
        :param text: 输入文本
        :return: 正确的文本
        """
        try:
            return self.corrector.correct(text)['target']
        except Exception as e:
            print(f'纠错失败{e}')
            return text

    @staticmethod
    def remove_rare_words(text: str, rare_words: set):
        """
        删除稀缺词
        :param text: 输入文本
        :param rare_words: 根据词频统计的稀缺词
        :return: 去除稀缺词后的文本
        """
        words = text.split()
        return ' '.join([word for word in words if word not in rare_words])

    @staticmethod
    def chinese_segmentation(text: str):
        """
        使用jieba库进行分词（空格隔断；精确模式）
        :param text: 未分词的文本
        :return: 已分词的文本
        """
        return ' '.join(jieba.lcut(text))

    @staticmethod
    def clean_chinese_text(text: str):
        """
        使用正则表达式去除标点符号、特殊字符和空白字符，并保留中英文和数字
        :param text: 未清洗文本
        :return: 清洗后的文本
        """
        text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s]', '', text)
        text = re.sub(r'\s+', ' ', text).strip()
        return text

    @staticmethod
    def avg_word_length(text: str):
        """
        计算平均词语长度
        :param text: 输入文本
        :return: 该文本中的平均词语长度
        """
        words = jieba.lcut(text)
        if len(words) == 0:
            return 0
        return sum(len(word) for word in words) / len(words)

    @staticmethod
    def load_chinese_stop_words(stop_words_path: str):
        """
        载入停用词表
        :param stop_words_path: 本地停用词表路径
        :return: 停用词表集合
        """
        base_stopwords = set()
        try:
            with open(stop_words_path, 'r', encoding='utf-8') as f:
                custom_stopwords = set([line.strip() for line in f])
            base_stopwords.update(custom_stopwords)
        except FileNotFoundError:
            print(f"警告: 未找到停用词文件 {stop_words_path}")
        return base_stopwords

    def __getitem__(self, key):
        """
        实现外部直接访问self.df的内部元素
        :param key: 参数
        :return: df的内部元素
        """
        if isinstance(key, tuple):
            if any(isinstance(k, slice) for k in key):
                return self.df.iloc[key]
            else:
                return self.df[list(key)]
        else:
            if isinstance(key, str):
                return self.df[key]
            else:
                return self.df.iloc[key]

    def __getattr__(self, item):
        """
        实现外部直接访问self.df的方法
        :param item: 方法名
        :return: df的方法调用结果
        """
        if hasattr(self.df, item):
            return getattr(self.df, item)
        raise AttributeError(f"'{item}' object has no attribute '{item}'")


if __name__ == '__main__':
    dataset = DataLoader('./data/train.csv', './data/HITstopwords.txt').analyze()
    print(dataset.head())
    dataset.to_csv('./data/train_preprocessed.csv', index=False)
