"""
数据清洗模块
用于清洗和预处理采集到的关键词数据
"""
import pandas as pd
import numpy as np
import re
import jieba
import jieba.analyse
from collections import Counter
import logging
from typing import List, Set, Dict, Tuple
from textblob import TextBlob
from spellchecker import SpellChecker


class KeywordCleaner:
    """关键词数据清洗器"""
    
    def __init__(self, config: Dict):
        """
        初始化清洗器
        
        Args:
            config: 配置字典
        """
        self.config = config
        self.cleaning_config = config.get('data_cleaning', {})
        self.logger = logging.getLogger(__name__)
        
        # 初始化中文分词
        jieba.initialize()
        
        # 初始化拼写检查器
        self.spell_checker = SpellChecker(language='en')
        
        # 加载停用词
        self.stopwords = self._load_stopwords()
        
        # 编译正则表达式
        self._compile_regex_patterns()
    
    def _load_stopwords(self) -> Set[str]:
        """
        加载停用词
        
        Returns:
            停用词集合
        """
        stopwords = set()
        
        stopword_config = self.cleaning_config.get('stopwords', {})
        
        # 加载默认停用词
        if stopword_config.get('use_default', True):
            default_stopwords = {
                '的', '是', '了', '在', '有', '和', '就', '不', '人', '都', '一', '一个',
                '我', '你', '他', '她', '它', '我们', '你们', '他们', '这', '那', '这个',
                '那个', '上', '下', '来', '去', '出', '也', '还', '要', '可以', '能',
                '会', '说', '看', '知道', '时候', '什么', '怎么', '为什么', '哪里'
            }
            stopwords.update(default_stopwords)
        
        # 加载自定义停用词
        custom_stopwords = stopword_config.get('custom_stopwords', [])
        stopwords.update(custom_stopwords)
        
        self.logger.info(f"加载了 {len(stopwords)} 个停用词")
        return stopwords
    
    def _compile_regex_patterns(self):
        """编译正则表达式模式"""
        self.patterns = {
            'special_chars': re.compile(r'[^\w\s\u4e00-\u9fff]'),  # 特殊字符
            'numbers_only': re.compile(r'^\d+$'),  # 纯数字
            'english_only': re.compile(r'^[a-zA-Z\s]+$'),  # 纯英文
            'whitespace': re.compile(r'\s+'),  # 多个空白字符
            'html_tags': re.compile(r'<[^>]+>'),  # HTML标签
            'urls': re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'),  # URL
            'email': re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b')  # 邮箱
        }
    
    def basic_cleaning(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        基本数据清洗
        
        Args:
            df: 原始数据DataFrame
        
        Returns:
            清洗后的DataFrame
        """
        self.logger.info("开始基本数据清洗...")
        
        original_count = len(df)
        
        # 复制数据
        cleaned_df = df.copy()
        
        # 去除空值
        if self.cleaning_config.get('basic_cleaning', {}).get('remove_empty', True):
            cleaned_df = cleaned_df.dropna(subset=['keyword'])
            cleaned_df = cleaned_df[cleaned_df['keyword'].str.strip() != '']
        
        # 去除重复
        if self.cleaning_config.get('basic_cleaning', {}).get('remove_duplicates', True):
            cleaned_df = cleaned_df.drop_duplicates(subset=['keyword'])
        
        # 长度过滤
        basic_config = self.cleaning_config.get('basic_cleaning', {})
        min_length = basic_config.get('min_length', 2)
        max_length = basic_config.get('max_length', 50)
        
        cleaned_df = cleaned_df[
            (cleaned_df['keyword'].str.len() >= min_length) &
            (cleaned_df['keyword'].str.len() <= max_length)
        ]
        
        # 重置索引
        cleaned_df = cleaned_df.reset_index(drop=True)
        
        self.logger.info(f"基本清洗完成，从 {original_count} 条记录减少到 {len(cleaned_df)} 条")
        
        return cleaned_df
    
    def text_cleaning(self, text: str) -> str:
        """
        文本清洗
        
        Args:
            text: 原始文本
        
        Returns:
            清洗后的文本
        """
        if not isinstance(text, str):
            return ""
        
        # 去除HTML标签
        text = self.patterns['html_tags'].sub('', text)
        
        # 去除URL和邮箱
        text = self.patterns['urls'].sub('', text)
        text = self.patterns['email'].sub('', text)
        
        # 去除特殊字符（根据配置）
        filter_rules = self.cleaning_config.get('filter_rules', {})
        if filter_rules.get('remove_special_chars', True):
            text = self.patterns['special_chars'].sub(' ', text)
        
        # 标准化空白字符
        text = self.patterns['whitespace'].sub(' ', text)
        
        # 去除首尾空白
        text = text.strip()
        
        return text
    
    def filter_keywords(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        过滤关键词
        
        Args:
            df: 数据DataFrame
        
        Returns:
            过滤后的DataFrame
        """
        self.logger.info("开始过滤关键词...")
        
        original_count = len(df)
        filtered_df = df.copy()
        
        filter_rules = self.cleaning_config.get('filter_rules', {})
        
        # 过滤纯数字关键词
        if filter_rules.get('remove_numbers_only', True):
            mask = ~filtered_df['keyword'].str.match(self.patterns['numbers_only'])
            filtered_df = filtered_df[mask]
        
        # 过滤纯英文关键词（可选）
        if filter_rules.get('remove_english_only', False):
            mask = ~filtered_df['keyword'].str.match(self.patterns['english_only'])
            filtered_df = filtered_df[mask]
        
        # 过滤停用词
        mask = ~filtered_df['keyword'].isin(self.stopwords)
        filtered_df = filtered_df[mask]
        
        # 重置索引
        filtered_df = filtered_df.reset_index(drop=True)
        
        self.logger.info(f"关键词过滤完成，从 {original_count} 条记录减少到 {len(filtered_df)} 条")
        
        return filtered_df
    
    def extract_features(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        提取关键词特征
        
        Args:
            df: 数据DataFrame
        
        Returns:
            添加特征后的DataFrame
        """
        self.logger.info("开始提取关键词特征...")
        
        feature_df = df.copy()
        
        # 基本特征
        feature_df['length'] = feature_df['keyword'].str.len()
        feature_df['word_count'] = feature_df['keyword'].str.split().str.len()
        feature_df['chinese_char_count'] = feature_df['keyword'].apply(
            lambda x: len(re.findall(r'[\u4e00-\u9fff]', x))
        )
        feature_df['english_char_count'] = feature_df['keyword'].apply(
            lambda x: len(re.findall(r'[a-zA-Z]', x))
        )
        feature_df['digit_count'] = feature_df['keyword'].apply(
            lambda x: len(re.findall(r'\d', x))
        )
        
        # 分词特征
        feature_df['jieba_words'] = feature_df['keyword'].apply(
            lambda x: list(jieba.cut(x, cut_all=False))
        )
        feature_df['jieba_word_count'] = feature_df['jieba_words'].apply(len)
        
        # TF-IDF关键词提取
        feature_df['tfidf_keywords'] = feature_df['keyword'].apply(
            lambda x: jieba.analyse.extract_tags(x, topK=5, withWeight=False)
        )
        
        # 语言检测（简单版本）
        feature_df['is_chinese'] = feature_df['chinese_char_count'] > 0
        feature_df['is_english'] = feature_df['english_char_count'] > 0
        feature_df['is_mixed'] = feature_df['is_chinese'] & feature_df['is_english']
        
        # 计算关键词复杂度
        feature_df['complexity_score'] = (
            feature_df['word_count'] * 0.3 +
            feature_df['chinese_char_count'] * 0.4 +
            feature_df['english_char_count'] * 0.2 +
            feature_df['digit_count'] * 0.1
        )
        
        self.logger.info("关键词特征提取完成")
        
        return feature_df
    
    def detect_duplicates_fuzzy(self, df: pd.DataFrame, similarity_threshold: float = 0.8) -> pd.DataFrame:
        """
        模糊去重
        
        Args:
            df: 数据DataFrame
            similarity_threshold: 相似度阈值
        
        Returns:
            去重后的DataFrame
        """
        self.logger.info("开始模糊去重...")
        
        from difflib import SequenceMatcher
        
        def similar(a, b):
            return SequenceMatcher(None, a, b).ratio()
        
        # 创建结果列表
        unique_keywords = []
        processed_keywords = set()
        
        for idx, row in df.iterrows():
            keyword = row['keyword']
            
            if keyword in processed_keywords:
                continue
            
            # 检查是否与已有关键词相似
            is_similar = False
            for unique_keyword in unique_keywords:
                if similar(keyword, unique_keyword['keyword']) > similarity_threshold:
                    is_similar = True
                    break
            
            if not is_similar:
                unique_keywords.append(row.to_dict())
                processed_keywords.add(keyword)
        
        result_df = pd.DataFrame(unique_keywords)
        
        self.logger.info(f"模糊去重完成，从 {len(df)} 条记录减少到 {len(result_df)} 条")
        
        return result_df
    
    def clean_keywords(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        完整的关键词清洗流程
        
        Args:
            df: 原始数据DataFrame
        
        Returns:
            清洗后的DataFrame
        """
        self.logger.info("开始关键词数据清洗流程...")
        
        # 1. 基本清洗
        cleaned_df = self.basic_cleaning(df)
        
        # 2. 文本清洗
        cleaned_df['keyword'] = cleaned_df['keyword'].apply(self.text_cleaning)
        
        # 3. 再次基本清洗（清理空值）
        cleaned_df = cleaned_df[cleaned_df['keyword'] != '']
        cleaned_df = cleaned_df.reset_index(drop=True)
        
        # 4. 关键词过滤
        cleaned_df = self.filter_keywords(cleaned_df)
        
        # 5. 特征提取
        cleaned_df = self.extract_features(cleaned_df)
        
        # 6. 模糊去重
        cleaned_df = self.detect_duplicates_fuzzy(cleaned_df)
        
        # 7. 最终整理
        cleaned_df = cleaned_df.reset_index(drop=True)
        cleaned_df['id'] = range(1, len(cleaned_df) + 1)
        cleaned_df['cleaned_time'] = pd.Timestamp.now()
        
        self.logger.info(f"关键词清洗流程完成，最终得到 {len(cleaned_df)} 个清洗后的关键词")
        
        return cleaned_df
    
    def generate_cleaning_report(self, original_df: pd.DataFrame, cleaned_df: pd.DataFrame) -> Dict:
        """
        生成清洗报告
        
        Args:
            original_df: 原始数据
            cleaned_df: 清洗后数据
        
        Returns:
            清洗报告字典
        """
        report = {
            'original_count': len(original_df),
            'cleaned_count': len(cleaned_df),
            'removed_count': len(original_df) - len(cleaned_df),
            'removal_rate': (len(original_df) - len(cleaned_df)) / len(original_df) * 100,
            'avg_length_original': original_df['keyword'].str.len().mean(),
            'avg_length_cleaned': cleaned_df['length'].mean(),
            'chinese_keywords': cleaned_df['is_chinese'].sum(),
            'english_keywords': cleaned_df['is_english'].sum(),
            'mixed_keywords': cleaned_df['is_mixed'].sum(),
            'top_keywords': cleaned_df.nlargest(10, 'complexity_score')['keyword'].tolist()
        }
        
        return report


def main():
    """测试函数"""
    from utils import load_config, setup_logging
    
    # 加载配置
    config = load_config()
    logger = setup_logging(config)
    
    # 创建清洗器
    cleaner = KeywordCleaner(config)
    
    # 读取原始数据
    try:
        df = pd.read_csv('data/raw/collected_keywords.csv')
        print(f"读取到 {len(df)} 个原始关键词")
        
        # 清洗数据
        cleaned_df = cleaner.clean_keywords(df)
        
        # 生成报告
        report = cleaner.generate_cleaning_report(df, cleaned_df)
        print("清洗报告:")
        for key, value in report.items():
            if key != 'top_keywords':
                print(f"  {key}: {value}")
        
        # 保存结果
        cleaned_df.to_csv('data/cleaned/cleaned_keywords.csv', 
                         index=False, encoding='utf-8-sig')
        
        print(f"清洗完成，共得到 {len(cleaned_df)} 个清洗后的关键词")
        
    except FileNotFoundError:
        print("未找到原始数据文件，请先运行数据采集")


if __name__ == "__main__":
    main()
